repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
jayhetee/mpld3 | mpld3/mpld3renderer.py | 16 | 10262 | """
mpld3 renderer
==============
This is the renderer class which implements the mplexporter framework for mpld3
"""
__all__ = ["MPLD3Renderer"]
import random
import json
import jinja2
import itertools
import numpy as np
from .mplexporter.utils import color_to_hex
from .mplexporter.exporter import Exporter
from .mplexporter.renderers import Renderer
from .utils import get_id
from .plugins import get_plugins
class MPLD3Renderer(Renderer):
"""Renderer class for mpld3
This renderer class plugs into the ``mplexporter`` package in order to
convert matplotlib figures into a JSON-serializable dictionary
representation which can be read by mpld3.js.
"""
def __init__(self):
self.figure_json = None
self.axes_json = None
self.finished_figures = []
@staticmethod
def datalabel(i):
return "data{0:02d}".format(i)
def add_data(self, data, key="data"):
"""Add a dataset to the current figure
If the dataset matches any already added data, we use that instead.
Parameters
----------
data : array_like
a shape [N,2] array of data
key : string (optional)
the key to use for the data
Returns
-------
datadict : dictionary
datadict has the keys "data", "xindex", "yindex", which will
be passed to the mpld3 JSON object.
"""
# Check if any column of the data exists elsewhere
# If so, we'll use that dataset rather than duplicating it.
data = np.asarray(data)
if data.ndim != 2 and data.shape[1] != 2:
raise ValueError("Data is expected to be of size [N, 2]")
for (i, d) in enumerate(self.datasets):
if data.shape[0] != d.shape[0]:
continue
matches = np.array([np.all(col == d.T, axis=1) for col in data.T])
if not np.any(matches):
continue
# If we get here, we've found a dataset with a matching column
# we'll update this data with additional columns if necessary
new_data = list(self.datasets[i].T)
indices = []
for j in range(data.shape[1]):
whr = np.where(matches[j])[0]
if len(whr):
indices.append(whr[0])
else:
# append a new column to the data
new_data.append(data[:, j])
indices.append(len(new_data) - 1)
self.datasets[i] = np.asarray(new_data).T
datalabel = self.datalabel(i + 1)
xindex, yindex = map(int, indices)
break
else:
# else here can be thought of as "if no break"
# if we get here, then there were no matching datasets
self.datasets.append(data)
datalabel = self.datalabel(len(self.datasets))
xindex = 0
yindex = 1
self.datalabels.append(datalabel)
return {key: datalabel, "xindex": xindex, "yindex": yindex}
def open_figure(self, fig, props):
self.datasets = []
self.datalabels = []
self.figure_json = dict(width=props['figwidth'] * props['dpi'],
height=props['figheight'] * props['dpi'],
axes=[],
data={},
id=get_id(fig))
def close_figure(self, fig):
additional_css = []
additional_js = []
for i, dataset in enumerate(self.datasets):
datalabel = self.datalabel(i + 1)
self.figure_json['data'][datalabel] = np.asarray(dataset).tolist()
self.figure_json["plugins"] = []
for plugin in get_plugins(fig):
self.figure_json["plugins"].append(plugin.get_dict())
additional_css.append(plugin.css())
additional_js.append(plugin.javascript())
self.finished_figures.append((fig, self.figure_json,
"".join(additional_css),
"".join(additional_js)))
def open_axes(self, ax, props):
self.axes_json = dict(bbox=props['bounds'],
xlim=props['xlim'],
ylim=props['ylim'],
xdomain=props['xdomain'],
ydomain=props['ydomain'],
xscale=props['xscale'],
yscale=props['yscale'],
axes=props['axes'],
axesbg=props['axesbg'],
axesbgalpha=props['axesbgalpha'],
zoomable=bool(props['dynamic']),
id=get_id(ax),
lines=[],
paths=[],
markers=[],
texts=[],
collections=[],
images=[])
self.figure_json['axes'].append(self.axes_json)
# Get shared axes info
xsib = ax.get_shared_x_axes().get_siblings(ax)
ysib = ax.get_shared_y_axes().get_siblings(ax)
self.axes_json['sharex'] = [get_id(axi) for axi in xsib
if axi is not ax]
self.axes_json['sharey'] = [get_id(axi) for axi in ysib
if axi is not ax]
def close_axes(self, ax):
self.axes_json = None
# If draw_line() is not implemented, it will be delegated to draw_path
# Should we get rid of this? There's not really any advantage here
def draw_line(self, data, coordinates, style, label, mplobj=None):
line = self.add_data(data)
line['coordinates'] = coordinates
line['id'] = get_id(mplobj)
for key in ['color', 'linewidth', 'dasharray', 'alpha', 'zorder']:
line[key] = style[key]
# Some browsers do not accept dasharray="10,0"
# This should probably be addressed in mplexporter.
if line['dasharray'] == "10,0":
line['dasharray'] = "none"
self.axes_json['lines'].append(line)
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
path = self.add_data(data)
path['coordinates'] = coordinates
path['pathcodes'] = pathcodes
path['id'] = get_id(mplobj)
if offset is not None:
path['offset'] = list(offset)
path['offsetcoordinates'] = offset_coordinates
for key in ['dasharray', 'alpha', 'facecolor',
'edgecolor', 'edgewidth', 'zorder']:
path[key] = style[key]
# Some browsers do not accept dasharray="10,0"
# This should probably be addressed in mplexporter.
if path['dasharray'] == "10,0":
path['dasharray'] = "none"
self.axes_json['paths'].append(path)
# If draw_markers is not implemented, it will be delegated to draw_path
def draw_markers(self, data, coordinates, style, label, mplobj=None):
markers = self.add_data(data)
markers["coordinates"] = coordinates
markers['id'] = get_id(mplobj, 'pts')
for key in ['facecolor', 'edgecolor', 'edgewidth',
'alpha', 'zorder']:
markers[key] = style[key]
if style.get('markerpath'):
vertices, codes = style['markerpath']
markers['markerpath'] = (vertices.tolist(), codes)
self.axes_json['markers'].append(markers)
# If draw_path_collection is not implemented,
# it will be delegated to draw_path
def draw_path_collection(self, paths, path_coordinates, path_transforms,
offsets, offset_coordinates, offset_order,
styles, mplobj=None):
if len(paths) != 0:
styles = dict(alphas=[styles['alpha']],
edgecolors=[color_to_hex(ec)
for ec in styles['edgecolor']],
facecolors=[color_to_hex(fc)
for fc in styles['facecolor']],
edgewidths=styles['linewidth'],
offsetcoordinates=offset_coordinates,
pathcoordinates=path_coordinates,
zorder=styles['zorder'])
pathsdict = self.add_data(offsets, "offsets")
pathsdict['paths'] = [(v.tolist(), p) for (v, p) in paths]
pathsdict['pathtransforms'] = [(t[0, :2].tolist()
+ t[1, :2].tolist()
+ t[2, :2].tolist())
for t in path_transforms]
pathsdict.update(styles)
pathsdict['id'] = get_id(mplobj)
self.axes_json['collections'].append(pathsdict)
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
text = dict(text=text,
position=tuple(position),
coordinates=coordinates,
h_anchor=TEXT_HA_DICT[style['halign']],
v_baseline=TEXT_VA_DICT[style['valign']],
rotation=-style['rotation'],
fontsize=style['fontsize'],
color=style['color'],
alpha=style['alpha'],
zorder=style['zorder'],
id=get_id(mplobj))
self.axes_json['texts'].append(text)
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
image = dict(data=imdata, extent=extent, coordinates=coordinates)
image.update(style)
image['id'] = get_id(mplobj)
self.axes_json['images'].append(image)
TEXT_VA_DICT = {'bottom': 'auto',
'baseline': 'auto',
'center': 'central',
'top': 'hanging'}
TEXT_HA_DICT = {'left': 'start',
'center': 'middle',
'right': 'end'}
| bsd-3-clause |
sparklingpandas/sparklingpandas | sparklingpandas/pstatcounter.py | 4 | 5444 | """
This module provides statistics for L{PRDD}s.
Look at the stats() method on PRDD for more info.
"""
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sparklingpandas.utils import add_pyspark_path
add_pyspark_path()
from pyspark.statcounter import StatCounter
import scipy.stats as scistats
import numpy as np
class PStatCounter(object):
"""
A wrapper around StatCounter which collects stats for multiple columns
"""
def __init__(self, dataframes, columns):
"""
Creates a stats counter for the provided DataFrames
computing the stats for all of the columns in columns.
Parameters
----------
dataframes: list of dataframes, containing the values to compute stats
on.
columns: list of strs, list of columns to compute the stats on.
"""
assert (not isinstance(columns, basestring)), "columns should be a " \
"list of strs, " \
"not a str!"
assert isinstance(columns, list), "columns should be a list!"
self._columns = columns
self._counters = dict((column, StatCounter()) for column in columns)
for df in dataframes:
self.merge(df)
def merge(self, frame):
"""
Add another DataFrame to the PStatCounter.
"""
for column, values in frame.iteritems():
# Temporary hack, fix later
counter = self._counters.get(column)
for value in values:
if counter is not None:
counter.merge(value)
def merge_pstats(self, other):
"""
Merge all of the stats counters of the other PStatCounter with our
counters.
"""
if not isinstance(other, PStatCounter):
raise Exception("Can only merge PStatcounters!")
for column, counter in self._counters.items():
other_counter = other._counters.get(column)
self._counters[column] = counter.mergeStats(other_counter)
return self
def __str__(self):
formatted_str = ""
for column, counter in self._counters.items():
formatted_str += "(field: %s, counters: %s)" % (column, counter)
return formatted_str
def __repr__(self):
return self.__str__()
class ColumnStatCounters(object):
"""
A wrapper around StatCounter which collects stats for multiple columns
"""
def __init__(self, dataframes=None, columns=None):
"""
Creates a stats counter for the provided data frames
computing the stats for all of the columns in columns.
Parameters
----------
dataframes: list of dataframes, containing the values to compute stats
on columns: list of strs, list of columns to compute the stats on
"""
self._column_stats = dict((column_name, StatCounter()) for
column_name in columns)
for single_df in dataframes:
self.merge(single_df)
def merge(self, frame):
"""
Add another DataFrame to the accumulated stats for each column.
Parameters
----------
frame: pandas DataFrame we will update our stats counter with.
"""
for column_name, _ in self._column_stats.items():
data_arr = frame[[column_name]].values
count, min_max_tup, mean, _, _, _ = \
scistats.describe(data_arr)
stats_counter = StatCounter()
stats_counter.n = count
stats_counter.mu = mean
stats_counter.m2 = np.sum((data_arr - mean) ** 2)
stats_counter.minValue, stats_counter.maxValue = min_max_tup
self._column_stats[column_name] = self._column_stats[
column_name].mergeStats(stats_counter)
return self
def merge_stats(self, other_col_counters):
"""
Merge statistics from a different column stats counter in to this one.
Parameters
----------
other_column_counters: Other col_stat_counter to marge in to this one.
"""
for column_name, _ in self._column_stats.items():
self._column_stats[column_name] = self._column_stats[column_name] \
.mergeStats(other_col_counters._column_stats[column_name])
return self
def __str__(self):
formatted_str = ""
for column, counter in self._column_stats.items():
formatted_str += "(field: %s, counters: %s)" % (column, counter)
return formatted_str
def __repr__(self):
return self.__str__()
| apache-2.0 |
DonBeo/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 11 | 23587 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_loss_grad_hess,
_multinomial_loss_grad_hess
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
assert_raises(ValueError, LogisticRegression(C=-1).fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial')]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for method in ('lbfgs', 'newton-cg', 'liblinear'):
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-16, solver=method)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-16)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4)
# test for fit_intercept=True
for method in ('lbfgs', 'newton-cg', 'liblinear'):
Cs = [1e3]
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-4, solver=method)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4)
def test_liblinear_random_state():
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0)
lr2.fit(X, y)
assert_array_almost_equal(lr1.coef_, lr2.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_loss_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_loss_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_2, grad_2, hess = _logistic_loss_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
loss_interp_2, grad_interp_2, hess = \
_logistic_loss_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
loss_interp, grad_interp, hess_interp = _logistic_loss_grad_hess(
w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
loss, grad, hess = _logistic_loss_grad_hess(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=3)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=3)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=auto
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='auto')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='auto')
clf_lib.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_loss_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
_, grad, hessp = _multinomial_loss_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_loss_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_loss_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
| bsd-3-clause |
percyfal/bokeh | examples/app/pivot/main.py | 8 | 27818 | ''' Provide a pivot chart maker example app. Similar to Excel pivot charts,
but with additonal ability to explode into multiple charts.
See README.md for more information.
'''
from __future__ import division
import os
import math
import json
import pandas as pd
import collections
import bokeh.io as bio
import bokeh.layouts as bl
import bokeh.models.widgets as bmw
import bokeh.models.sources as bms
import bokeh.models.tools as bmt
import bokeh.plotting as bp
import datetime
import six.moves.urllib.parse as urlp
#Defaults to configure:
PLOT_WIDTH = 300
PLOT_HEIGHT = 300
PLOT_FONT_SIZE = 10
PLOT_AXIS_LABEL_SIZE = 8
PLOT_LABEL_ORIENTATION = 45
OPACITY = 0.8
X_SCALE = 1
Y_SCALE = 1
CIRCLE_SIZE = 9
BAR_WIDTH = 0.5
LINE_WIDTH = 2
COLORS = ['#5e4fa2', '#3288bd', '#66c2a5', '#abdda4', '#e6f598', '#fee08b', '#fdae61', '#f46d43', '#d53e4f', '#9e0142']*1000
C_NORM = "#31AADE"
CHARTTYPES = ['Dot', 'Line', 'Bar', 'Area']
STACKEDTYPES = ['Bar', 'Area']
AGGREGATIONS = ['None', 'Sum']
def get_data(data_source):
'''
Read a csv into a pandas dataframe, and determine which columns of the dataframe
are discrete (strings), continuous (numbers), able to be filtered (aka filterable),
and able to be used as a series (aka seriesable). NA values are filled based on the type of column,
and the dataframe and columns are returned.
Args:
data_source (string): Path to csv file.
Returns:
df_source (pandas dataframe): A dataframe of the csv source, with filled NA values.
cols (dict): Keys are categories of columns of df_source, and values are a list of columns of that category.
'''
df_source = pd.read_csv(data_source)
cols = {}
cols['all'] = df_source.columns.values.tolist()
cols['discrete'] = [x for x in cols['all'] if df_source[x].dtype == object]
cols['continuous'] = [x for x in cols['all'] if x not in cols['discrete']]
cols['filterable'] = cols['discrete']+[x for x in cols['continuous'] if len(df_source[x].unique()) < 100]
cols['seriesable'] = cols['discrete']+[x for x in cols['continuous'] if len(df_source[x].unique()) < 60]
df_source[cols['discrete']] = df_source[cols['discrete']].fillna('{BLANK}')
df_source[cols['continuous']] = df_source[cols['continuous']].fillna(0)
return (df_source, cols)
def build_widgets(df_source, cols, defaults, init_load=False, init_config={}):
'''
Use a dataframe and its columns to set widget options. Widget values may
be set by URL parameters via init_config.
Args:
df_source (pandas dataframe): Dataframe of the csv source.
cols (dict): Keys are categories of columns of df_source, and values are a list of columns of that category.
defaults (dict): Keys correspond to widgets, and values (str) are the default values of those widgets.
init_load (boolean, optional): If this is the initial page load, then this will be True, else False.
init_config (dict): Initial widget configuration passed via URL.
Returns:
wdg (ordered dict): Dictionary of bokeh.model.widgets.
'''
#Add widgets
wdg = collections.OrderedDict()
wdg['data'] = bmw.TextInput(title='Data Source (required)', value=defaults['data_source'], css_classes=['wdgkey-data'])
wdg['x_dropdown'] = bmw.Div(text='X-Axis (required)', css_classes=['x-dropdown'])
wdg['x'] = bmw.Select(title='X-Axis (required)', value=defaults['x'], options=['None'] + cols['all'], css_classes=['wdgkey-x', 'x-drop'])
wdg['x_group'] = bmw.Select(title='Group X-Axis By', value=defaults['x_group'], options=['None'] + cols['seriesable'], css_classes=['wdgkey-x_group', 'x-drop'])
wdg['y_dropdown'] = bmw.Div(text='Y-Axis (required)', css_classes=['y-dropdown'])
wdg['y'] = bmw.Select(title='Y-Axis (required)', value=defaults['y'], options=['None'] + cols['all'], css_classes=['wdgkey-y', 'y-drop'])
wdg['y_agg'] = bmw.Select(title='Y-Axis Aggregation', value='Sum', options=AGGREGATIONS, css_classes=['wdgkey-y_agg', 'y-drop'])
wdg['series_dropdown'] = bmw.Div(text='Series', css_classes=['series-dropdown'])
wdg['series'] = bmw.Select(title='Separate Series By', value=defaults['series'], options=['None'] + cols['seriesable'],
css_classes=['wdgkey-series', 'series-drop'])
wdg['series_legend'] = bmw.Div(text='', css_classes=['series-drop'])
wdg['explode_dropdown'] = bmw.Div(text='Explode', css_classes=['explode-dropdown'])
wdg['explode'] = bmw.Select(title='Explode By', value=defaults['explode'], options=['None'] + cols['seriesable'], css_classes=['wdgkey-explode', 'explode-drop'])
wdg['explode_group'] = bmw.Select(title='Group Exploded Charts By', value=defaults['explode_group'], options=['None'] + cols['seriesable'],
css_classes=['wdgkey-explode_group', 'explode-drop'])
wdg['filters'] = bmw.Div(text='Filters', css_classes=['filters-dropdown'])
for j, col in enumerate(cols['filterable']):
val_list = [str(i) for i in sorted(df_source[col].unique().tolist())]
wdg['heading_filter_'+str(j)] = bmw.Div(text=col, css_classes=['filter-head'])
wdg['filter_'+str(j)] = bmw.CheckboxGroup(labels=val_list, active=list(range(len(val_list))), css_classes=['wdgkey-filter_'+str(j), 'filter'])
wdg['update'] = bmw.Button(label='Update Filters', button_type='success', css_classes=['filters-update'])
wdg['adjustments'] = bmw.Div(text='Plot Adjustments', css_classes=['adjust-dropdown'])
wdg['chart_type'] = bmw.Select(title='Chart Type', value=defaults['chart_type'], options=CHARTTYPES, css_classes=['wdgkey-chart_type', 'adjust-drop'])
wdg['plot_width'] = bmw.TextInput(title='Plot Width (px)', value=str(PLOT_WIDTH), css_classes=['wdgkey-plot_width', 'adjust-drop'])
wdg['plot_height'] = bmw.TextInput(title='Plot Height (px)', value=str(PLOT_HEIGHT), css_classes=['wdgkey-plot_height', 'adjust-drop'])
wdg['plot_title'] = bmw.TextInput(title='Plot Title', value='', css_classes=['wdgkey-plot_title', 'adjust-drop'])
wdg['plot_title_size'] = bmw.TextInput(title='Plot Title Font Size', value=str(PLOT_FONT_SIZE), css_classes=['wdgkey-plot_title_size', 'adjust-drop'])
wdg['opacity'] = bmw.TextInput(title='Opacity (0-1)', value=str(OPACITY), css_classes=['wdgkey-opacity', 'adjust-drop'])
wdg['x_scale'] = bmw.TextInput(title='X Scale', value=str(X_SCALE), css_classes=['wdgkey-x_scale', 'adjust-drop'])
wdg['x_min'] = bmw.TextInput(title='X Min', value='', css_classes=['wdgkey-x_min', 'adjust-drop'])
wdg['x_max'] = bmw.TextInput(title='X Max', value='', css_classes=['wdgkey-x_max', 'adjust-drop'])
wdg['x_title'] = bmw.TextInput(title='X Title', value='', css_classes=['wdgkey-x_title', 'adjust-drop'])
wdg['x_title_size'] = bmw.TextInput(title='X Title Font Size', value=str(PLOT_FONT_SIZE), css_classes=['wdgkey-x_title_size', 'adjust-drop'])
wdg['x_major_label_size'] = bmw.TextInput(title='X Labels Font Size', value=str(PLOT_AXIS_LABEL_SIZE), css_classes=['wdgkey-x_major_label_size', 'adjust-drop'])
wdg['x_major_label_orientation'] = bmw.TextInput(title='X Labels Degrees', value=str(PLOT_LABEL_ORIENTATION),
css_classes=['wdgkey-x_major_label_orientation', 'adjust-drop'])
wdg['y_scale'] = bmw.TextInput(title='Y Scale', value=str(Y_SCALE), css_classes=['wdgkey-y_scale', 'adjust-drop'])
wdg['y_min'] = bmw.TextInput(title='Y Min', value='', css_classes=['wdgkey-y_min', 'adjust-drop'])
wdg['y_max'] = bmw.TextInput(title='Y Max', value='', css_classes=['wdgkey-y_max', 'adjust-drop'])
wdg['y_title'] = bmw.TextInput(title='Y Title', value='', css_classes=['wdgkey-y_title', 'adjust-drop'])
wdg['y_title_size'] = bmw.TextInput(title='Y Title Font Size', value=str(PLOT_FONT_SIZE), css_classes=['wdgkey-y_title_size', 'adjust-drop'])
wdg['y_major_label_size'] = bmw.TextInput(title='Y Labels Font Size', value=str(PLOT_AXIS_LABEL_SIZE), css_classes=['wdgkey-y_major_label_size', 'adjust-drop'])
wdg['circle_size'] = bmw.TextInput(title='Circle Size (Dot Only)', value=str(CIRCLE_SIZE), css_classes=['wdgkey-circle_size', 'adjust-drop'])
wdg['bar_width'] = bmw.TextInput(title='Bar Width (Bar Only)', value=str(BAR_WIDTH), css_classes=['wdgkey-bar_width', 'adjust-drop'])
wdg['line_width'] = bmw.TextInput(title='Line Width (Line Only)', value=str(LINE_WIDTH), css_classes=['wdgkey-line_width', 'adjust-drop'])
wdg['download'] = bmw.Button(label='Download csv', button_type='success')
wdg['export_config'] = bmw.Div(text='Export Config to URL', css_classes=['export-config', 'bk-bs-btn', 'bk-bs-btn-success'])
#use init_config (from 'widgets' parameter in URL query string) to configure widgets.
if init_load:
for key in init_config:
if key in wdg:
if hasattr(wdg[key], 'value'):
wdg[key].value = str(init_config[key])
elif hasattr(wdg[key], 'active'):
wdg[key].active = init_config[key]
#Add update functions for widgets
wdg['data'].on_change('value', update_data)
wdg['update'].on_click(update_plots)
wdg['download'].on_click(download)
for name in wdg_col:
wdg[name].on_change('value', update_wdg_col)
for name in wdg_non_col:
wdg[name].on_change('value', update_wdg)
return wdg
def set_df_plots(df_source, cols, wdg):
'''
Apply filters, scaling, aggregation, and sorting to source dataframe, and return the result.
Args:
df_source (pandas dataframe): Dataframe of the csv source.
cols (dict): Keys are categories of columns of df_source, and values are a list of columns of that category.
wdg (ordered dict): Dictionary of bokeh model widgets.
Returns:
df_plots (pandas dataframe): df_source after having been filtered, scaled, aggregated, and sorted.
'''
df_plots = df_source.copy()
#Apply filters
for j, col in enumerate(cols['filterable']):
active = [wdg['filter_'+str(j)].labels[i] for i in wdg['filter_'+str(j)].active]
if col in cols['continuous']:
active = [float(i) for i in active]
df_plots = df_plots[df_plots[col].isin(active)]
#Scale Axes
if wdg['x_scale'].value != '' and wdg['x'].value in cols['continuous']:
df_plots[wdg['x'].value] = df_plots[wdg['x'].value] * float(wdg['x_scale'].value)
if wdg['y_scale'].value != '' and wdg['y'].value in cols['continuous']:
df_plots[wdg['y'].value] = df_plots[wdg['y'].value] * float(wdg['y_scale'].value)
#Apply Aggregation
if wdg['y_agg'].value == 'Sum' and wdg['y'].value in cols['continuous']:
groupby_cols = [wdg['x'].value]
if wdg['x_group'].value != 'None': groupby_cols = [wdg['x_group'].value] + groupby_cols
if wdg['series'].value != 'None': groupby_cols = [wdg['series'].value] + groupby_cols
if wdg['explode'].value != 'None': groupby_cols = [wdg['explode'].value] + groupby_cols
if wdg['explode_group'].value != 'None': groupby_cols = [wdg['explode_group'].value] + groupby_cols
df_plots = df_plots.groupby(groupby_cols, as_index=False, sort=False)[wdg['y'].value].sum()
#Sort Dataframe
sortby_cols = [wdg['x'].value]
if wdg['x_group'].value != 'None': sortby_cols = [wdg['x_group'].value] + sortby_cols
if wdg['series'].value != 'None': sortby_cols = [wdg['series'].value] + sortby_cols
if wdg['explode'].value != 'None': sortby_cols = [wdg['explode'].value] + sortby_cols
if wdg['explode_group'].value != 'None': sortby_cols = [wdg['explode_group'].value] + sortby_cols
df_plots = df_plots.sort_values(sortby_cols).reset_index(drop=True)
#Rearrange column order for csv download
unsorted_columns = [col for col in df_plots.columns if col not in sortby_cols + [wdg['y'].value]]
df_plots = df_plots[sortby_cols + unsorted_columns + [wdg['y'].value]]
return df_plots
def create_figures(df_plots, wdg, cols):
'''
Create figures based on the data in a dataframe and widget configuration, and return figures in a list.
The explode widget determines if there will be multiple figures.
Args:
df_plots (pandas dataframe): Dataframe of csv source after being filtered, scaled, aggregated, and sorted.
wdg (ordered dict): Dictionary of bokeh model widgets.
cols (dict): Keys are categories of columns of df_source, and values are a list of columns of that category.
Returns:
plot_list (list): List of bokeh.model.figures.
'''
plot_list = []
df_plots_cp = df_plots.copy()
if wdg['explode'].value == 'None':
plot_list.append(create_figure(df_plots_cp, df_plots, wdg, cols))
else:
if wdg['explode_group'].value == 'None':
for explode_val in df_plots_cp[wdg['explode'].value].unique().tolist():
df_exploded = df_plots_cp[df_plots_cp[wdg['explode'].value].isin([explode_val])]
plot_list.append(create_figure(df_exploded, df_plots, wdg, cols, explode_val))
else:
for explode_group in df_plots_cp[wdg['explode_group'].value].unique().tolist():
df_exploded_group = df_plots_cp[df_plots_cp[wdg['explode_group'].value].isin([explode_group])]
for explode_val in df_exploded_group[wdg['explode'].value].unique().tolist():
df_exploded = df_exploded_group[df_exploded_group[wdg['explode'].value].isin([explode_val])]
plot_list.append(create_figure(df_exploded, df_plots, wdg, cols, explode_val, explode_group))
return plot_list
def create_figure(df_exploded, df_plots, wdg, cols, explode_val=None, explode_group=None):
'''
Create and return a figure based on the data in a dataframe and widget configuration.
Args:
df_exploded (pandas dataframe): Dataframe of just the data that will be plotted in this figure.
df_plots (pandas dataframe): Dataframe of all plots data, used only for maintaining consistent series colors.
wdg (ordered dict): Dictionary of bokeh model widgets.
cols (dict): Keys are categories of columns of df_source, and values are a list of columns of that category.
explode_val (string, optional): The value in the column designated by wdg['explode'] that applies to this figure.
explode_group (string, optional): The value in the wdg['explode_group'] column that applies to this figure.
Returns:
p (bokeh.model.figure): A figure, with all glyphs added by the add_glyph() function.
'''
# If x_group has a value, create a combined column in the dataframe for x and x_group
x_col = wdg['x'].value
if wdg['x_group'].value != 'None':
x_col = str(wdg['x_group'].value) + '_' + str(wdg['x'].value)
df_exploded[x_col] = df_exploded[wdg['x_group'].value].map(str) + ' ' + df_exploded[wdg['x'].value].map(str)
#Build x and y ranges and figure title
kw = dict()
#Set x and y ranges. When x is grouped, there is added complication of separating the groups
xs = df_exploded[x_col].values.tolist()
ys = df_exploded[wdg['y'].value].values.tolist()
if wdg['x_group'].value != 'None':
kw['x_range'] = []
unique_groups = df_exploded[wdg['x_group'].value].unique().tolist()
unique_xs = df_exploded[wdg['x'].value].unique().tolist()
for i, ugr in enumerate(unique_groups):
for uxs in unique_xs:
kw['x_range'].append(str(ugr) + ' ' + str(uxs))
#Between groups, add entries that consist of spaces. Increase number of spaces from
#one break to the next so that each entry is unique
kw['x_range'].append(' ' * (i + 1))
elif wdg['x'].value in cols['discrete']:
kw['x_range'] = sorted(set(xs))
if wdg['y'].value in cols['discrete']:
kw['y_range'] = sorted(set(ys))
#Set figure title
kw['title'] = wdg['plot_title'].value
seperator = '' if kw['title'] == '' else ', '
if explode_val is not None:
if explode_group is not None:
kw['title'] = kw['title'] + seperator + "%s = %s" % (wdg['explode_group'].value, str(explode_group))
seperator = '' if kw['title'] == '' else ', '
kw['title'] = kw['title'] + seperator + "%s = %s" % (wdg['explode'].value, str(explode_val))
#Add figure tools
hover = bmt.HoverTool(
tooltips=[
("ser", "@ser_legend"),
("x", "@x_legend"),
("y", "@y_legend"),
]
)
TOOLS = [bmt.BoxZoomTool(), bmt.PanTool(), hover, bmt.ResetTool(), bmt.SaveTool()]
#Create figure with the ranges, titles, and tools, and adjust formatting and labels
p = bp.figure(plot_height=int(wdg['plot_height'].value), plot_width=int(wdg['plot_width'].value), tools=TOOLS, **kw)
p.toolbar.active_drag = TOOLS[0]
p.title.text_font_size = wdg['plot_title_size'].value + 'pt'
p.xaxis.axis_label = wdg['x_title'].value
p.yaxis.axis_label = wdg['y_title'].value
p.xaxis.axis_label_text_font_size = wdg['x_title_size'].value + 'pt'
p.yaxis.axis_label_text_font_size = wdg['y_title_size'].value + 'pt'
p.xaxis.major_label_text_font_size = wdg['x_major_label_size'].value + 'pt'
p.yaxis.major_label_text_font_size = wdg['y_major_label_size'].value + 'pt'
p.xaxis.major_label_orientation = 'horizontal' if wdg['x_major_label_orientation'].value == '0' else math.radians(float(wdg['x_major_label_orientation'].value))
if wdg['x'].value in cols['continuous']:
if wdg['x_min'].value != '': p.x_range.start = float(wdg['x_min'].value)
if wdg['x_max'].value != '': p.x_range.end = float(wdg['x_max'].value)
if wdg['y'].value in cols['continuous']:
if wdg['y_min'].value != '': p.y_range.start = float(wdg['y_min'].value)
if wdg['y_max'].value != '': p.y_range.end = float(wdg['y_max'].value)
#Add glyphs to figure
c = C_NORM
if wdg['series'].value == 'None':
if wdg['y_agg'].value != 'None' and wdg['y'].value in cols['continuous']:
xs = df_exploded[x_col].values.tolist()
ys = df_exploded[wdg['y'].value].values.tolist()
add_glyph(wdg, p, xs, ys, c)
else:
full_series = df_plots[wdg['series'].value].unique().tolist() #for colors only
if wdg['chart_type'].value in STACKEDTYPES: #We are stacking the series
xs_full = sorted(df_exploded[x_col].unique().tolist())
y_bases_pos = [0]*len(xs_full)
y_bases_neg = [0]*len(xs_full)
for i, ser in enumerate(df_exploded[wdg['series'].value].unique().tolist()):
c = COLORS[full_series.index(ser)]
df_series = df_exploded[df_exploded[wdg['series'].value].isin([ser])]
xs_ser = df_series[x_col].values.tolist()
ys_ser = df_series[wdg['y'].value].values.tolist()
if wdg['chart_type'].value not in STACKEDTYPES: #The series will not be stacked
add_glyph(wdg, p, xs_ser, ys_ser, c, series=ser)
else: #We are stacking the series
ys_pos = [ys_ser[xs_ser.index(x)] if x in xs_ser and ys_ser[xs_ser.index(x)] > 0 else 0 for i, x in enumerate(xs_full)]
ys_neg = [ys_ser[xs_ser.index(x)] if x in xs_ser and ys_ser[xs_ser.index(x)] < 0 else 0 for i, x in enumerate(xs_full)]
ys_stacked_pos = [ys_pos[i] + y_bases_pos[i] for i in range(len(xs_full))]
ys_stacked_neg = [ys_neg[i] + y_bases_neg[i] for i in range(len(xs_full))]
add_glyph(wdg, p, xs_full, ys_stacked_pos, c, y_bases=y_bases_pos, series=ser)
add_glyph(wdg, p, xs_full, ys_stacked_neg, c, y_bases=y_bases_neg, series=ser)
y_bases_pos = ys_stacked_pos
y_bases_neg = ys_stacked_neg
return p
def add_glyph(wdg, p, xs, ys, c, y_bases=None, series=None):
'''
Add a glyph to a Bokeh figure, depending on the chosen chart type.
Args:
wdg (ordered dict): Dictionary of bokeh model widgets.
p (bokeh.model.figure): Bokeh figure.
xs (list): List of x-values. These could be numeric or strings.
ys (list): List of y-values. These could be numeric or strings. If series data is stacked, these values include stacking.
c (string): Color to use for this series.
y_bases (list, optional): Only used when stacking series. This is the previous cumulative stacking level.
series (string): Name of current series for this glyph.
Returns:
Nothing.
'''
alpha = float(wdg['opacity'].value)
y_unstacked = list(ys) if y_bases is None else [ys[i] - y_bases[i] for i in range(len(ys))]
ser = ['None']*len(xs) if series is None else [series]*len(xs)
if wdg['chart_type'].value == 'Dot':
source = bms.ColumnDataSource({'x': xs, 'y': ys, 'x_legend': xs, 'y_legend': y_unstacked, 'ser_legend': ser})
p.circle('x', 'y', source=source, color=c, size=int(wdg['circle_size'].value), fill_alpha=alpha, line_color=None, line_width=None)
elif wdg['chart_type'].value == 'Line':
source = bms.ColumnDataSource({'x': xs, 'y': ys, 'x_legend': xs, 'y_legend': y_unstacked, 'ser_legend': ser})
p.line('x', 'y', source=source, color=c, alpha=alpha, line_width=float(wdg['line_width'].value))
elif wdg['chart_type'].value == 'Bar':
if y_bases is None: y_bases = [0]*len(ys)
centers = [(ys[i] + y_bases[i])/2 for i in range(len(ys))]
heights = [abs(ys[i] - y_bases[i]) for i in range(len(ys))]
source = bms.ColumnDataSource({'x': xs, 'y': centers, 'x_legend': xs, 'y_legend': y_unstacked, 'h': heights, 'ser_legend': ser})
p.rect('x', 'y', source=source, height='h', color=c, fill_alpha=alpha, width=float(wdg['bar_width'].value), line_color=None, line_width=None)
elif wdg['chart_type'].value == 'Area':
if y_bases is None: y_bases = [0]*len(ys)
xs_around = xs + xs[::-1]
ys_around = y_bases + ys[::-1]
source = bms.ColumnDataSource({'x': xs_around, 'y': ys_around})
p.patch('x', 'y', source=source, alpha=alpha, fill_color=c, line_color=None, line_width=None)
def build_series_legend(df_plots, series_val):
'''
Return html for series legend, based on values of column that was chosen for series, and global COLORS.
Args:
df_plots (pandas dataframe): Dataframe of all plots data.
series_val (string): Header for column chosen as series.
Returns:
series_legend_string (string): html to be used as legend.
'''
series_legend_string = '<div class="legend-header">Series Legend</div><div class="legend-body">'
if series_val != 'None':
active_list = df_plots[series_val].unique().tolist()
for i, txt in reversed(list(enumerate(active_list))):
series_legend_string += '<div class="legend-entry"><span class="legend-color" style="background-color:' + str(COLORS[i]) + ';"></span>'
series_legend_string += '<span class="legend-text">' + str(txt) +'</span></div>'
series_legend_string += '</div>'
return series_legend_string
def update_data(attr, old, new):
'''
When data source is updated, rebuild widgets and plots.
'''
defaults['data_source'] = gl['widgets']['data'].value
for w in wdg_col:
defaults[w] = 'None'
defaults['chart_type'] = 'Dot'
gl['df_source'], gl['columns'] = get_data(defaults['data_source'])
gl['widgets'] = build_widgets(gl['df_source'], gl['columns'], defaults)
gl['controls'].children = list(gl['widgets'].values())
gl['plots'].children = []
def update_wdg(attr, old, new):
'''
When general widgets are updated (not in wdg_col), update plots only.
'''
update_plots()
def update_wdg_col(attr, old, new):
'''
When widgets in wdg_col are updated, set the options of all wdg_col widgets,
and update plots.
'''
set_wdg_col_options()
update_plots()
def set_wdg_col_options():
'''
Limit available options for wdg_col widgets based on their selected values, so that users
cannot select the same value for two different wdg_col widgets.
'''
cols = gl['columns']
wdg = gl['widgets']
#get list of selected values and use to reduce selection options.
sels = [str(wdg[w].value) for w in wdg_col if str(wdg[w].value) !='None']
all_reduced = [x for x in cols['all'] if x not in sels]
ser_reduced = [x for x in cols['seriesable'] if x not in sels]
for w in wdg_col:
val = str(wdg[w].value)
none_append = [] if val == 'None' else ['None']
opt_reduced = all_reduced if w in wdg_col_all else ser_reduced
wdg[w].options = [val] + opt_reduced + none_append
def update_plots():
'''
Make sure x axis and y axis are set. If so, set the dataframe for the plots and build them.
'''
if gl['widgets']['x'].value == 'None' or gl['widgets']['y'].value == 'None':
gl['plots'].children = []
return
gl['df_plots'] = set_df_plots(gl['df_source'], gl['columns'], gl['widgets'])
gl['widgets']['series_legend'].text = build_series_legend(gl['df_plots'], gl['widgets']['series'].value)
gl['plots'].children = create_figures(gl['df_plots'], gl['widgets'], gl['columns'])
def download():
'''
Download a csv file of the currently viewed data to the downloads/ directory,
with the current timestamp.
'''
gl['df_plots'].to_csv(os.path.dirname(os.path.realpath(__file__)) + '/downloads/out '+
datetime.datetime.now().strftime("%Y-%m-%d %H-%M-%S-%f")+'.csv', index=False)
#initialize globals dict
gl = {'df_source':None, 'df_plots':None, 'columns':None, 'widgets':None, 'controls': None, 'plots':None}
#List of widgets that use columns as their selectors
wdg_col_all = ['x', 'y'] #all columns available for these widgets
wdg_col_ser = ['x_group', 'series', 'explode', 'explode_group'] #seriesable columns available for these widgets
wdg_col = wdg_col_all + wdg_col_ser
#List of widgets that don't use columns as selector and share general widget update function
wdg_non_col = ['chart_type', 'y_agg', 'plot_title', 'plot_title_size',
'plot_width', 'plot_height', 'opacity', 'x_min', 'x_max', 'x_scale', 'x_title',
'x_title_size', 'x_major_label_size', 'x_major_label_orientation',
'y_min', 'y_max', 'y_scale', 'y_title', 'y_title_size', 'y_major_label_size',
'circle_size', 'bar_width', 'line_width']
#Specify default widget values
defaults = {}
defaults['data_source'] = os.path.dirname(os.path.realpath(__file__)) + '/csv/US_electric_power_generation.csv'
for w in wdg_col:
defaults[w] = 'None'
defaults['x'] = 'Year'
defaults['y'] = 'Electricity Generation (TWh)'
defaults['series'] = 'Technology'
defaults['explode'] = 'Case'
defaults['chart_type'] = 'Area'
#On initial load, read 'widgets' parameter from URL query string and use to set data source (data_source)
#and widget configuration object (wdg_config)
wdg_config = {}
args = bio.curdoc().session_context.request.arguments
wdg_arr = args.get('widgets')
if wdg_arr is not None:
wdg_config = json.loads(urlp.unquote(wdg_arr[0].decode('utf-8')))
if 'data' in wdg_config:
defaults['data_source'] = str(wdg_config['data'])
for w in wdg_col:
defaults[w] = 'None'
#build widgets and plots
gl['df_source'], gl['columns'] = get_data(defaults['data_source'])
gl['widgets'] = build_widgets(gl['df_source'], gl['columns'], defaults, init_load=True, init_config=wdg_config)
set_wdg_col_options()
gl['controls'] = bl.widgetbox(list(gl['widgets'].values()), id='widgets_section')
gl['plots'] = bl.column([], id='plots_section')
update_plots()
layout = bl.row(gl['controls'], gl['plots'], id='layout')
bio.curdoc().add_root(layout)
bio.curdoc().title = "Exploding Pivot Chart Maker"
| bsd-3-clause |
peterbraden/tensorflow | tensorflow/contrib/learn/python/learn/tests/test_early_stopping.py | 5 | 2501 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import random
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
class EarlyStoppingTest(tf.test.TestCase):
def testIrisES(self):
random.seed(42)
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
test_size=0.2,
random_state=42)
X_train, X_val, y_train, y_val = train_test_split(
X_train, y_train, test_size=0.2)
val_monitor = learn.monitors.ValidationMonitor(X_val, y_val, n_classes=3)
# classifier without early stopping - overfitting
classifier1 = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3,
steps=1000)
classifier1.fit(X_train, y_train)
score1 = accuracy_score(y_test, classifier1.predict(X_test))
# classifier with early stopping - improved accuracy on testing set
classifier2 = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3,
steps=1000)
classifier2.fit(X_train, y_train, val_monitor)
score2 = accuracy_score(y_test, classifier2.predict(X_test))
# self.assertGreater(score2, score1, "No improvement using early stopping.")
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
pdamodaran/yellowbrick | yellowbrick/features/importances.py | 1 | 13346 | # yellowbrick.features.importances
# Feature importance visualizer
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Fri Mar 02 15:21:36 2018 -0500
# Author: Rebecca Bilbro <[email protected]>
# Updated: Sun Jun 24 10:53:36 2018 -0500
#
# Copyright (C) 2018 District Data Labs
# For license information, see LICENSE.txt
#
# ID: importances.py [] [email protected] $
"""
Implementation of a feature importances visualizer. This visualizer sits in
kind of a weird place since it is technically a model scoring visualizer, but
is generally used for feature engineering.
"""
##########################################################################
## Imports
##########################################################################
import warnings
import numpy as np
import matplotlib.pyplot as plt
from yellowbrick.base import ModelVisualizer
from yellowbrick.utils import is_dataframe, is_classifier
from yellowbrick.exceptions import YellowbrickTypeError, NotFitted, YellowbrickWarning
from ..draw import bar_stack
##########################################################################
## Feature Visualizer
##########################################################################
class FeatureImportances(ModelVisualizer):
"""
Displays the most informative features in a model by showing a bar chart
of features ranked by their importances. Although primarily a feature
engineering mechanism, this visualizer requires a model that has either a
``coef_`` or ``feature_importances_`` parameter after fit.
Note: Some classification models such as ``LogisticRegression``, return
``coef_`` as a multidimensional array of shape ``(n_classes, n_features)``.
In this case, the ``FeatureImportances`` visualizer computes the mean of the
``coefs_`` by class for each feature.
Parameters
----------
model : Estimator
A Scikit-Learn estimator that learns feature importances. Must support
either ``coef_`` or ``feature_importances_`` parameters.
ax : matplotlib Axes, default: None
The axis to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
labels : list, default: None
A list of feature names to use. If a DataFrame is passed to fit and
features is None, feature names are selected as the column names.
relative : bool, default: True
If true, the features are described by their relative importance as a
percentage of the strongest feature component; otherwise the raw
numeric description of the feature importance is shown.
absolute : bool, default: False
Make all coeficients absolute to more easily compare negative
coeficients with positive ones.
xlabel : str, default: None
The label for the X-axis. If None is automatically determined by the
underlying model and options provided.
stack : bool, default: False
If true and the classifier returns multi-class feature importance,
then a stacked bar plot is plotted; otherwise the mean of the
feature importance across classes are plotted.
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Attributes
----------
features_ : np.array
The feature labels ranked according to their importance
feature_importances_ : np.array
The numeric value of the feature importance computed by the model
classes_ : np.array
The classees labeled. Is not None only for classifier.
Examples
--------
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> visualizer = FeatureImportances(GradientBoostingClassifier())
>>> visualizer.fit(X, y)
>>> visualizer.poof()
"""
def __init__(self, model, ax=None, labels=None, relative=True,
absolute=False, xlabel=None, stack=False, **kwargs):
super(FeatureImportances, self).__init__(model, ax, **kwargs)
# Data Parameters
self.set_params(
labels=labels, relative=relative, absolute=absolute,
xlabel=xlabel, stack=stack
)
def fit(self, X, y=None, **kwargs):
"""
Fits the estimator to discover the feature importances described by
the data, then draws those importances as a bar plot.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
kwargs : dict
Keyword arguments passed to the fit method of the estimator.
Returns
-------
self : visualizer
The fit method must always return self to support pipelines.
"""
super(FeatureImportances, self).fit(X, y, **kwargs)
# Get the feature importances from the model
self.feature_importances_ = self._find_importances_param()
# Get the classes from the model
if is_classifier(self):
self.classes_ = self._find_classes_param()
else:
self.classes_ = None
self.stack = False
# If self.stack = True and feature importances is a multidim array,
# we're expecting a shape of (n_classes, n_features)
# therefore we flatten by taking the average by
# column to get shape (n_features,) (see LogisticRegression)
if not self.stack and self.feature_importances_.ndim > 1:
self.feature_importances_ = np.mean(self.feature_importances_, axis=0)
warnings.warn((
"detected multi-dimensional feature importances but stack=False, "
"using mean to aggregate them."
), YellowbrickWarning)
# Apply absolute value filter before normalization
if self.absolute:
self.feature_importances_ = np.abs(self.feature_importances_)
# Normalize features relative to the maximum
if self.relative:
maxv = np.abs(self.feature_importances_).max()
self.feature_importances_ /= maxv
self.feature_importances_ *= 100.0
# Create labels for the feature importances
# NOTE: this code is duplicated from MultiFeatureVisualizer
if self.labels is None:
# Use column names if a dataframe
if is_dataframe(X):
self.features_ = np.array(X.columns)
# Otherwise use the column index as the labels
else:
_, ncols = X.shape
self.features_ = np.arange(0, ncols)
else:
self.features_ = np.array(self.labels)
# Sort the features and their importances
if self.stack:
sort_idx = np.argsort(np.mean(self.feature_importances_, 0))
self.features_ = self.features_[sort_idx]
self.feature_importances_ = self.feature_importances_[:, sort_idx]
else:
sort_idx = np.argsort(self.feature_importances_)
self.features_ = self.features_[sort_idx]
self.feature_importances_ = self.feature_importances_[sort_idx]
# Draw the feature importances
self.draw()
return self
def draw(self, **kwargs):
"""
Draws the feature importances as a bar chart; called from fit.
"""
# Quick validation
for param in ('feature_importances_', 'features_'):
if not hasattr(self, param):
raise NotFitted("missing required param '{}'".format(param))
# Find the positions for each bar
pos = np.arange(self.features_.shape[0]) + 0.5
# Plot the bar chart
if self.stack:
legend_kws = {'bbox_to_anchor':(1.04, 0.5), 'loc':"center left"}
bar_stack(self.feature_importances_, ax=self.ax, labels=list(self.classes_),
ticks=self.features_, orientation='h', legend_kws=legend_kws)
else:
self.ax.barh(pos, self.feature_importances_, align='center')
# Set the labels for the bars
self.ax.set_yticks(pos)
self.ax.set_yticklabels(self.features_)
return self.ax
def finalize(self, **kwargs):
"""
Finalize the drawing setting labels and title.
"""
# Set the title
self.set_title('Feature Importances of {} Features using {}'.format(
len(self.features_), self.name))
# Set the xlabel
self.ax.set_xlabel(self._get_xlabel())
# Remove the ygrid
self.ax.grid(False, axis='y')
# Ensure we have a tight fit
plt.tight_layout()
def _find_classes_param(self):
"""
Searches the wrapped model for the classes_ parameter.
"""
for attr in ["classes_"]:
try:
return getattr(self.estimator, attr)
except AttributeError:
continue
raise YellowbrickTypeError(
"could not find classes_ param on {}".format(
self.estimator.__class__.__name__
)
)
def _find_importances_param(self):
"""
Searches the wrapped model for the feature importances parameter.
"""
for attr in ("feature_importances_", "coef_"):
try:
return getattr(self.estimator, attr)
except AttributeError:
continue
raise YellowbrickTypeError(
"could not find feature importances param on {}".format(
self.estimator.__class__.__name__
)
)
def _get_xlabel(self):
"""
Determines the xlabel based on the underlying data structure
"""
# Return user-specified label
if self.xlabel:
return self.xlabel
# Label for coefficients
if hasattr(self.estimator, "coef_"):
if self.relative:
return "relative coefficient magnitude"
return "coefficient value"
# Default label for feature_importances_
if self.relative:
return "relative importance"
return "feature importance"
def _is_fitted(self):
"""
Returns true if the visualizer has been fit.
"""
return hasattr(self, 'feature_importances_') and hasattr(self, 'features_')
##########################################################################
## Quick Method
##########################################################################
def feature_importances(model, X, y=None, ax=None, labels=None,
relative=True, absolute=False, xlabel=None,
stack=False, **kwargs):
"""
Displays the most informative features in a model by showing a bar chart
of features ranked by their importances. Although primarily a feature
engineering mechanism, this visualizer requires a model that has either a
``coef_`` or ``feature_importances_`` parameter after fit.
Parameters
----------
model : Estimator
A Scikit-Learn estimator that learns feature importances. Must support
either ``coef_`` or ``feature_importances_`` parameters.
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n, optional
An array or series of target or class values
ax : matplotlib Axes, default: None
The axis to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
labels : list, default: None
A list of feature names to use. If a DataFrame is passed to fit and
features is None, feature names are selected as the column names.
relative : bool, default: True
If true, the features are described by their relative importance as a
percentage of the strongest feature component; otherwise the raw
numeric description of the feature importance is shown.
absolute : bool, default: False
Make all coeficients absolute to more easily compare negative
coeficients with positive ones.
xlabel : str, default: None
The label for the X-axis. If None is automatically determined by the
underlying model and options provided.
stack : bool, default: False
If true and the classifier returns multi-class feature importance,
then a stacked bar plot is plotted; otherwise the mean of the
feature importance across classes are plotted.
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Returns
-------
ax : matplotlib axes
Returns the axes that the parallel coordinates were drawn on.
"""
# Instantiate the visualizer
visualizer = FeatureImportances(
model, ax, labels, relative, absolute, xlabel, stack, **kwargs)
# Fit and transform the visualizer (calls draw)
visualizer.fit(X, y)
visualizer.finalize()
# Return the axes object on the visualizer
return visualizer.ax
| apache-2.0 |
imanmafi/High-Frequency-Trading-Model-with-IB | params/strategy_parameters.py | 7 | 1981 | """
Author: James Ma
Email stuff here: [email protected]
"""
from datetime import datetime
import pandas as pd
import datetime as dt
class StrategyParameters:
def __init__(self, evaluation_time_secs, resample_interval_secs):
self.resample_interval_secs = resample_interval_secs
self.__evaluation_time_secs = evaluation_time_secs
self.__bootstrap_completed = False
self.last_evaluation_time = datetime.now()
self.__COL_BETA = 'beta'
self.__COL_VOLATILITY_RATIO = 'volatility_ratio'
self.indicators = pd.DataFrame(columns=[self.__COL_BETA,
self.__COL_VOLATILITY_RATIO])
def add_indicators(self, beta, volatility_ratio):
timestamp = dt.datetime.now()
self.indicators.loc[timestamp] = [beta, volatility_ratio]
self.indicators.sort_index(inplace=True)
def trim_indicators_series(self, cutoff_timestamp):
self.indicators = self.indicators[
self.indicators.index >= cutoff_timestamp]
def get_volatility_ratio(self):
return self.__get_latest_indicator_value(self.__COL_VOLATILITY_RATIO,
1)
def get_beta(self):
return self.__get_latest_indicator_value(self.__COL_BETA)
def __get_latest_indicator_value(self, column_name, default_value=0):
if len(self.indicators) > 0:
return self.indicators[column_name].values[-1]
return default_value
def set_bootstrap_completed(self):
self.__bootstrap_completed = True
self.set_new_evaluation_time()
def is_evaluation_time_elapsed(self):
seconds_elapsed = (datetime.now() - self.last_evaluation_time).seconds
return seconds_elapsed > self.__evaluation_time_secs
def set_new_evaluation_time(self):
self.last_evaluation_time = datetime.now()
def is_bootstrap_completed(self):
return self.__bootstrap_completed | mit |
rajat1994/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 85 | 8565 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample)
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1) | bsd-3-clause |
mhallsmoore/qstrader | tests/unit/broker/portfolio/test_position_handler.py | 1 | 4684 | from collections import OrderedDict
import numpy as np
import pandas as pd
import pytz
from qstrader.broker.portfolio.position_handler import PositionHandler
from qstrader.broker.transaction.transaction import Transaction
def test_transact_position_new_position():
"""
Tests the 'transact_position' method for a transaction
with a brand new asset and checks that all objects are
set correctly.
"""
# Create the PositionHandler, Transaction and
# carry out a transaction
ph = PositionHandler()
asset = 'EQ:AMZN'
transaction = Transaction(
asset,
quantity=100,
dt=pd.Timestamp('2015-05-06 15:00:00', tz=pytz.UTC),
price=960.0,
order_id=123,
commission=26.83
)
ph.transact_position(transaction)
# Check that the position object is set correctly
pos = ph.positions[asset]
assert pos.buy_quantity == 100
assert pos.sell_quantity == 0
assert pos.net_quantity == 100
assert pos.direction == 1
assert pos.avg_price == 960.2683000000001
def test_transact_position_current_position():
"""
Tests the 'transact_position' method for a transaction
with a current asset and checks that all objects are
set correctly.
"""
# Create the PositionHandler, Transaction and
# carry out a transaction
ph = PositionHandler()
asset = 'EQ:AMZN'
dt = pd.Timestamp('2015-05-06 15:00:00', tz=pytz.UTC)
new_dt = pd.Timestamp('2015-05-06 16:00:00', tz=pytz.UTC)
transaction_long = Transaction(
asset,
quantity=100,
dt=dt,
price=960.0,
order_id=123,
commission=26.83
)
ph.transact_position(transaction_long)
transaction_long_again = Transaction(
asset,
quantity=200,
dt=new_dt,
price=990.0,
order_id=234,
commission=18.53
)
ph.transact_position(transaction_long_again)
# Check that the position object is set correctly
pos = ph.positions[asset]
assert pos.buy_quantity == 300
assert pos.sell_quantity == 0
assert pos.net_quantity == 300
assert pos.direction == 1
assert np.isclose(pos.avg_price, 980.1512)
def test_transact_position_quantity_zero():
"""
Tests the 'transact_position' method for a transaction
with net zero quantity after the transaction to ensure
deletion of the position.
"""
# Create the PositionHandler, Transaction and
# carry out a transaction
ph = PositionHandler()
asset = 'EQ:AMZN'
dt = pd.Timestamp('2015-05-06 15:00:00', tz=pytz.UTC)
new_dt = pd.Timestamp('2015-05-06 16:00:00', tz=pytz.UTC)
transaction_long = Transaction(
asset,
quantity=100,
dt=dt,
price=960.0,
order_id=123, commission=26.83
)
ph.transact_position(transaction_long)
transaction_close = Transaction(
asset,
quantity=-100,
dt=new_dt,
price=980.0,
order_id=234,
commission=18.53
)
ph.transact_position(transaction_close)
# Go long and then close, then check that the
# positions OrderedDict is empty
assert ph.positions == OrderedDict()
def test_total_values_for_no_transactions():
"""
Tests 'total_market_value', 'total_unrealised_pnl',
'total_realised_pnl' and 'total_pnl' for the case
of no transactions being carried out.
"""
ph = PositionHandler()
assert ph.total_market_value() == 0.0
assert ph.total_unrealised_pnl() == 0.0
assert ph.total_realised_pnl() == 0.0
assert ph.total_pnl() == 0.0
def test_total_values_for_two_separate_transactions():
"""
Tests 'total_market_value', 'total_unrealised_pnl',
'total_realised_pnl' and 'total_pnl' for single
transactions in two separate assets.
"""
ph = PositionHandler()
# Asset 1
asset1 = 'EQ:AMZN'
dt1 = pd.Timestamp('2015-05-06 15:00:00', tz=pytz.UTC)
trans_pos_1 = Transaction(
asset1,
quantity=75,
dt=dt1,
price=483.45,
order_id=1,
commission=15.97
)
ph.transact_position(trans_pos_1)
# Asset 2
asset2 = 'EQ:MSFT'
dt2 = pd.Timestamp('2015-05-07 15:00:00', tz=pytz.UTC)
trans_pos_2 = Transaction(
asset2,
quantity=250,
dt=dt2,
price=142.58,
order_id=2,
commission=8.35
)
ph.transact_position(trans_pos_2)
# Check all total values
assert ph.total_market_value() == 71903.75
assert np.isclose(ph.total_unrealised_pnl(), -24.31999999999971)
assert ph.total_realised_pnl() == 0.0
assert np.isclose(ph.total_pnl(), -24.31999999999971)
| mit |
f171a9a3497c8b/fractals | fractals/lsys.py | 1 | 7311 | #!/usr/bin/python3
"""FRACTALS: LINDENMAYER SYSTEM"""
import numpy as np
import matplotlib.pyplot as plt
PRECISION = np.float32
def calc_rot_matrix(angle):
"""
Input:
angle
-- integer or float number
-- rotation angle in radians
-- positive number gives counter-clockwise direction of rotation (turns left)
-- negative number gives clockwise direction of rotation (turns right)
Returns 2x2 numpy array of floats, a 2D rotation matrix.
"""
return np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]], dtype=PRECISION)
def generate_pattern(lvl, states, rewrite_rules):
"""
Inputs:
lvl
-- integer number
-- the number of times (iterations) rewrite rules will be applied
states -- string, the initial state (axiom) of the system
rewrite_rules
-- dictionary
-- keys (character) -> symbols
-- values (string) -> replacement rules
Returns string of symbols.
"""
# In each iteration: check every character in states, replace valid symbol
# with rewrite rule or copy character, and update states
for _ in range(lvl + 1):
states = ''.join([rewrite_rules.get(symbol, symbol) for symbol in states])
# Clean states form rewrite rule flags/symbols
drawing_rules = 'F+-'
states = ''.join([symbol for symbol in states if symbol in drawing_rules])
return states
def generate_points(alpha, theta, length, states):
"""
Inputs:
alpha
-- integer or float number
-- angle (in degrees) between the positive x axis
and initial displacement vector
theta
-- integer or float number
-- angle (in degrees) of a single rotation
length
-- integer or float number
-- length of a displacement vector for one step
states -- string of symbols
Retrurns numpy array of coordinates of points on a plane.
Notes:
** Initial displacement vector starting point is allways
in the origin of the coordinate system.
** Only character F in states (alphabet) generates a new point.
"""
# Convert angles from degrees to radians
alpha = np.radians(alpha)
theta = np.radians(theta)
# Displacement vector, 2x1 numpy array
vec = np.array([[np.cos(alpha)], [np.sin(alpha)]], dtype=PRECISION)
vec_len = np.sqrt(vec[0] ** 2 + vec[1] ** 2)
# Rescale displacement vector
vec = vec / vec_len * length
# Rotation matrices for positive and negative angles
rot_left = calc_rot_matrix(theta)
rot_right = calc_rot_matrix(-theta)
# Container to store xy components/coordinates of points on a plane
points = np.zeros(shape=(2, states.count('F') + 1), dtype=PRECISION)
point_index = 1
for st in states:
if st == '+':
vec = np.dot(rot_right, vec)
elif st == '-':
vec = np.dot(rot_left, vec)
else:
points[:, point_index] = points[:, point_index - 1] + vec[:, 0]
point_index += 1
return points
def lindemayer(lvl, length, init_angle, angle, init_state,
title='LINDENMAYER FRACTAL', color='#0080FF', **rewrite_rules):
"""
Inputs:
lvl
-- integer number
-- the number of times (iterations) rewrite rules will be applied
length
-- integer or float number
-- length of a displacement vector of each step
init_angle
-- integer or float number
-- initial angle (in degrees) measured from the positive x axis
angle
-- integer or float number
-- angle (in degrees) of a single rotation
-- positive number gives counter-clockwise direction of rotation (turns left)
-- negative number gives clockwise direction of rotation (turns right)
init_state -- string, the initial state (axiom) of the system
title -- string, title of the plot
color -- string, valid matplotlib color
rewrite_rules
-- keyword arguments
-- keys (character) hold flags/symbols
-- values (string) hold rules for production/replacement
Displays the plot of calculated sequence of points.
This function does not return any value.
"""
states = generate_pattern(lvl, init_state, rewrite_rules)
points = generate_points(init_angle, angle, length, states)
plt.ioff()
plt.figure(num=title, facecolor='white', frameon=False, clear=True)
plt.style.use('fivethirtyeight')
plt.grid(False)
plt.axis('off')
plt.axis('equal')
plot_options = {
'color': color,
'alpha': 0.5,
'linestyle': '-',
'linewidth': 1.3,
'marker': '',
'antialiased': False,
}
plt.plot(points[0, :], points[1, :], **plot_options)
plt.show()
def heighway_dragon(lvl, length=1, init_angle=0, angle=90):
lindemayer(lvl, length, init_angle, angle, 'FX', 'HEIGHWAY DRAGON',
X='X+YF+', Y='-FX-Y')
def twin_dragon(lvl, length=1, init_angle=0, angle=90):
lindemayer(lvl, length, init_angle, angle, 'FX+FX+', 'TWIN DRAGON',
X='X+YF', Y='FX-Y')
def tetra_dragon(lvl, length=1, init_angle=0, angle=120):
lindemayer(lvl, length, init_angle, angle, 'F', 'TETRA DRAGON',
F='F+F-F')
def levy_dragon(lvl, length=1, init_angle=90, angle=45):
lindemayer(lvl, length, init_angle, angle, 'F', 'LEVY DRAGON',
F='+F--F+')
def koch_snowflake(lvl, length=1, init_angle=0, angle=60):
lindemayer(lvl, length, init_angle, angle, 'F++F++F', 'KOCH SNOWFLAKE',
F='F-F++F-F')
def koch_curve(lvl, length=1, init_angle=0, angle=90):
lindemayer(lvl, length, init_angle, angle, 'F+F+F+F', 'KOCH CURVE',
F='F+F-F-FF+F+F-F')
def sierpinski_triangle(lvl, length=1, init_angle=0, angle=120):
lindemayer(lvl, length, init_angle, angle, 'F+F+F', 'SIERPINSKI TRIANGLE',
F='F+F-F-F+F')
def hilbert_curve(lvl, length=1, init_angle=0, angle=90):
lindemayer(lvl, length, init_angle, angle, 'X', 'HILBERT CURVE',
X='-YF+XFX+FY-', Y='+XF-YFY-FX+')
def moor_curve(lvl, length=1, init_angle=0, angle=90):
lindemayer(lvl, length, init_angle, angle, 'XFX+F+XFX', 'MOOR CURVE',
X='-YF+XFX+FY-', Y='+XF-YFY-FX+')
def peano_curve(lvl, length=1, init_angle=0, angle=90):
lindemayer(lvl, length, init_angle, angle, 'X', 'PEANO CURVE',
X='XFYFX+F+YFXFY-F-XFYFX', Y='YFXFY-F-XFYFX+F+YFXFY')
def tiles(lvl, length=1, init_angle=0, angle=90):
lindemayer(lvl, length, init_angle, angle, 'F+F+F+F', 'TILES',
F='FF+F-F+F+FF')
def pentadendryt(lvl, length=2, init_angle=0, angle=72):
lindemayer(lvl, length, init_angle, angle, 'F', 'PENTADENDRYT',
F='F+F-F--F+F+F')
if __name__ == '__main__':
heighway_dragon(7)
# twin_dragon(10)
# tetra_dragon(6)
# levy_dragon(13)
koch_snowflake(2)
# koch_curve(2)
# sierpinski_triangle(5)
# hilbert_curve(5)
moor_curve(4)
# peano_curve(3)
# tiles(2)
# pentadendryt(4)
| mit |
mozman/ezdxf | tests/test_08_addons/test_814_text2path.py | 1 | 14273 | # Copyright (c) 2021, Manfred Moitzi
# License: MIT License
import pytest
pytest.importorskip("matplotlib") # requires matplotlib!
from matplotlib.font_manager import FontProperties, findfont
from ezdxf.tools.fonts import FontFace
from ezdxf.addons import text2path
from ezdxf.path import Path
from ezdxf import path, bbox
from ezdxf.entities import Text, Hatch
from ezdxf.layouts import VirtualLayout
NOTO_SANS_SC = "Noto Sans SC"
noto_sans_sc_not_found = "Noto" not in findfont(
FontProperties(family=NOTO_SANS_SC)
)
def _to_paths(s, f="Arial"):
return text2path.make_paths_from_str(s, font=FontFace(family=f))
@pytest.mark.parametrize(
"s,c",
[
["1", 1],
["2", 1],
[".", 1],
["0", 2],
["a", 2],
["!", 2],
["@", 2],
["8", 3],
["ü", 3],
["&", 3],
["ä", 4],
["ö", 4],
["%", 5],
],
)
def test_make_paths_from_str(s, c):
assert len(_to_paths(s)) == c
@pytest.mark.skipif(
noto_sans_sc_not_found, reason=f'Font "{NOTO_SANS_SC}" not found'
)
@pytest.mark.parametrize("s,c", [["中", 3], ["国", 4], ["文", 3], ["字", 2]])
def test_chinese_char_paths_from_str(s, c):
assert len(_to_paths(s, f=NOTO_SANS_SC)) == c
def contour_and_holes(group):
return group[0], group[1:]
@pytest.mark.parametrize(
"s,h",
[
["1", 0],
["2", 0],
[".", 0],
["0", 1],
["a", 1],
["8", 2],
],
)
def test_group_one_contour_with_holes(s, h):
paths = _to_paths(s)
result = list(path.group_paths(paths))
contour, holes = contour_and_holes(result[0])
assert isinstance(contour, Path)
assert len(holes) == h
@pytest.mark.parametrize("s", [":", "!", ";", "="])
def test_group_two_contours_without_holes(s):
paths = _to_paths(s)
result = list(path.group_paths(paths))
assert len(result) == 2
contour, holes = contour_and_holes(result[0])
assert isinstance(contour, Path)
assert len(holes) == 0
@pytest.mark.parametrize(
"s",
[
"Ü",
"ö",
"ä",
],
)
def test_group_three_contours_and_ignore_holes(s):
paths = _to_paths(s)
result = list(path.group_paths(paths))
assert len(result) == 3
contour, holes = contour_and_holes(result[0])
assert isinstance(contour, Path)
def test_group_percent_sign():
# Special case %: lower o is inside of the slash bounding box, but HATCH
# creation works as expected!
paths = _to_paths("%")
result = list(path.group_paths(paths))
assert len(result) == 2
contour, holes = contour_and_holes(result[0])
assert isinstance(contour, Path)
assert len(holes) == 2
@pytest.mark.skipif(
noto_sans_sc_not_found, reason='Font "Noto Sans SC" not found'
)
@pytest.mark.parametrize("s,c", [["中", 1], ["国", 1], ["文", 2], ["字", 2]])
def test_group_chinese_chars_and_ignore_holes(s, c):
paths = _to_paths(s, f=NOTO_SANS_SC)
result = list(path.group_paths(paths))
assert len(result) == c
contour, holes = contour_and_holes(result[0])
assert isinstance(contour, Path)
@pytest.fixture(scope="module")
def ff():
return FontFace(family="Arial")
class TestMakePathFromString:
# Surprise - even 0 and negative values work without any exceptions!
@pytest.mark.parametrize("size", [0, 0.05, 1, 2, 100, -1, -2, -100])
def test_text_path_height_for_exact_drawing_units(self, size, ff):
paths = text2path.make_paths_from_str("X", font=ff, size=size)
bbox = path.bbox(paths)
assert bbox.size.y == pytest.approx(abs(size))
@pytest.mark.parametrize("size", [0.05, 1, 2, 100])
def test_path_coordinates_for_positive_size(self, size, ff):
paths = text2path.make_paths_from_str("X", font=ff, size=size)
bbox = path.bbox(paths)
assert bbox.extmax.y == pytest.approx(size)
assert bbox.extmin.y == pytest.approx(0)
@pytest.mark.parametrize("size", [-0.05, -1, -2, -100])
def test_path_coordinates_for_negative_size(self, size, ff):
# Negative text height mirrors text about the x-axis!
paths = text2path.make_paths_from_str("X", font=ff, size=size)
bbox = path.bbox(paths)
assert bbox.extmax.y == pytest.approx(0)
assert bbox.extmin.y == pytest.approx(size)
@pytest.mark.parametrize("size", [0.05, 1, 2, 100])
def test_length_for_fit_alignment(self, size, ff):
length = 3
paths = text2path.make_paths_from_str(
"XXX", font=ff, size=size, align="FIT", length=length
)
bbox = path.bbox(paths)
assert bbox.size.x == pytest.approx(length), "expect exact length"
assert bbox.size.y == pytest.approx(
size
), "text height should be unscaled"
@pytest.mark.parametrize("size", [0.05, 1, 2, 100])
def test_scaled_height_and_length_for_aligned_text(self, size, ff):
length = 3
paths = text2path.make_paths_from_str(
"XXX", font=ff, size=size, align="LEFT"
)
default = path.bbox(paths)
paths = text2path.make_paths_from_str(
"XXX", font=ff, size=size, align="ALIGNED", length=length
)
bbox = path.bbox(paths)
scale = bbox.size.x / default.size.x
assert bbox.size.x == pytest.approx(length), "expect exact length"
assert bbox.size.y == pytest.approx(
size * scale
), "text height should be scaled"
def test_paths_from_empty_string(self, ff):
paths = text2path.make_paths_from_str("", font=ff)
assert len(paths) == 0
def test_make_multi_path_object(self, ff):
p = text2path.make_path_from_str("ABC", font=ff)
assert p.has_sub_paths is True
assert len(list(p.sub_paths())) == 6
def test_make_empty_multi_path_object(self, ff):
p = text2path.make_path_from_str("", font=ff)
assert p.has_sub_paths is False
assert len(p) == 0
class TestMakeHatchesFromString:
def test_hatches_from_empty_string(self, ff):
hatches = text2path.make_hatches_from_str("", font=ff)
assert len(hatches) == 0
def test_make_exterior_only_hatches(self, ff):
hatches = text2path.make_hatches_from_str("XXX", font=ff)
assert len(hatches) == 3
assert len(hatches[0].paths) == 1
def test_make_hatches_with_holes(self, ff):
hatches = text2path.make_hatches_from_str("AAA", font=ff)
assert len(hatches) == 3
assert len(hatches[0].paths) == 2, "expected external and one hole"
def test_total_length_for_fit_alignment(self, ff):
length = 3
hatches = text2path.make_hatches_from_str(
"XXX", font=ff, align="FIT", length=length
)
paths = []
for hatch in hatches:
paths.extend(path.from_hatch(hatch))
bbox = path.bbox(paths)
assert bbox.size.x == pytest.approx(length), "expect exact length"
assert bbox.size.y == pytest.approx(
1.0
), "text height should be unscaled"
def test_check_entity_type():
with pytest.raises(TypeError):
text2path.check_entity_type(None)
with pytest.raises(TypeError):
text2path.check_entity_type(Hatch())
def make_text(text, location, alignment, height=1.0, rotation=0):
text = Text.new(
dxfattribs={
"text": text,
"height": height,
"rotation": rotation,
}
)
text.set_pos(location, align=alignment)
return text
def get_path_bbox(text):
p = text2path.make_path_from_entity(text)
return path.bbox([p], flatten=0)
def get_paths_bbox(text):
paths = text2path.make_paths_from_entity(text)
return path.bbox(paths, flatten=0)
def get_hatches_bbox(text):
hatches = text2path.make_hatches_from_entity(text)
return bbox.extents(hatches, flatten=0)
@pytest.fixture(params=[get_path_bbox, get_paths_bbox, get_hatches_bbox])
def get_bbox(request):
return request.param
class TestMakePathsFromEntity:
"""Test Paths (and Hatches) from TEXT entities.
make_hatches_from_entity() is basically make_paths_from_entity(), but
returns Hatch entities instead of Path objects.
Important: Don't use text with top or bottom curves for testing ("S", "O").
The Path bounding box calculation uses the "fast" method by checking only
the curve control points, which are outside the curve borders.
"""
@pytest.mark.parametrize(
"builder, type_",
[
(text2path.make_paths_from_entity, Path),
(text2path.make_hatches_from_entity, Hatch),
],
)
def test_text_returns_correct_types(self, builder, type_):
text = make_text("TEXT", (0, 0), "LEFT")
objects = builder(text)
assert len(objects) == 4
assert isinstance(objects[0], type_)
def test_text_height(self, get_bbox):
text = make_text("TEXT", (0, 0), "LEFT", height=1.5)
bbox = get_bbox(text)
assert bbox.size.y == pytest.approx(1.5)
def test_alignment_left(self, get_bbox):
text = make_text("TEXT", (7, 7), "LEFT")
bbox = get_bbox(text)
# font rendering is tricky, base offsets depend on the rendering engine
# and on extended font metrics, ...
assert bbox.extmin.x == pytest.approx(7, abs=0.1)
def test_alignment_center(self, get_bbox):
text = make_text("TEXT", (7, 7), "CENTER")
bbox = get_bbox(text)
assert bbox.center.x == pytest.approx(7)
def test_alignment_right(self, get_bbox):
text = make_text("TEXT", (7, 7), "RIGHT")
bbox = get_bbox(text)
assert bbox.extmax.x == pytest.approx(7)
def test_alignment_baseline(self, get_bbox):
text = make_text("TEXT", (7, 7), "CENTER")
bbox = get_bbox(text)
assert bbox.extmin.y == pytest.approx(7)
def test_alignment_bottom(self, get_bbox):
text = make_text("j", (7, 7), "BOTTOM_CENTER")
bbox = get_bbox(text)
# bottom border of descender should be 7, but ...
assert bbox.extmin.y == pytest.approx(7, abs=0.1)
def test_alignment_middle(self, get_bbox):
text = make_text("X", (7, 7), "MIDDLE_CENTER")
bbox = get_bbox(text)
assert bbox.center.y == pytest.approx(7)
def test_alignment_top(self, get_bbox):
text = make_text("X", (7, 7), "TOP_CENTER")
bbox = get_bbox(text)
assert bbox.extmax.y == pytest.approx(7)
def test_alignment_fit(self, get_bbox):
length = 2
height = 1
text = make_text("TEXT", (0, 0), "LEFT", height=height)
text.set_pos((1, 0), (1 + length, 0), "FIT")
bbox = get_bbox(text)
assert (
bbox.size.x == length
), "expected text length fits into given length"
assert bbox.size.y == height, "expected unscaled text height"
assert bbox.extmin.isclose((1, 0))
def test_alignment_aligned(self, get_bbox):
length = 2
height = 1
text = make_text("TEXT", (0, 0), "CENTER", height=height)
bbox = get_bbox(text)
ratio = bbox.size.x / bbox.size.y
text.set_pos((1, 0), (1 + length, 0), "ALIGNED")
bbox = get_bbox(text)
assert (
bbox.size.x == length
), "expected text length fits into given length"
assert bbox.size.y != height, "expected scaled text height"
assert bbox.extmin.isclose((1, 0))
assert bbox.size.x / bbox.size.y == pytest.approx(
ratio
), "expected same width/height ratio"
def test_rotation_90(self, get_bbox):
# Horizontal reference measurements:
bbox_hor = get_bbox(make_text("TEXT", (7, 7), "MIDDLE_CENTER"))
text_vert = make_text("TEXT", (7, 7), "MIDDLE_CENTER", rotation=90)
bbox_vert = get_bbox(text_vert)
assert bbox_hor.center == bbox_vert.center
assert bbox_hor.size.x == bbox_vert.size.y
assert bbox_hor.size.y == bbox_vert.size.x
Kind = text2path.Kind
class TestVirtualEntities:
@pytest.fixture
def text(self):
return make_text("TEST", (0, 0), "LEFT")
def test_virtual_entities_as_hatches(self, text):
entities = text2path.virtual_entities(text, kind=Kind.HATCHES)
types = {e.dxftype() for e in entities}
assert types == {"HATCH"}
def test_virtual_entities_as_splines_and_polylines(self, text):
entities = text2path.virtual_entities(text, kind=Kind.SPLINES)
types = {e.dxftype() for e in entities}
assert types == {"SPLINE", "POLYLINE"}
def test_virtual_entities_as_lwpolylines(self, text):
entities = text2path.virtual_entities(text, kind=Kind.LWPOLYLINES)
types = {e.dxftype() for e in entities}
assert types == {"LWPOLYLINE"}
def test_virtual_entities_to_all_types_at_once(self, text):
entities = text2path.virtual_entities(
text, kind=Kind.HATCHES + Kind.SPLINES + Kind.LWPOLYLINES
)
types = {e.dxftype() for e in entities}
assert types == {"LWPOLYLINE", "SPLINE", "POLYLINE", "HATCH"}
class TestExplode:
"""Based on text2path.virtual_entities() function, see test above."""
@pytest.fixture
def text(self):
return make_text("TEST", (0, 0), "LEFT")
def test_source_entity_is_destroyed(self, text):
assert text.is_alive is True
text2path.explode(text, kind=4)
assert (
text.is_alive is False
), "source entity should always be destroyed"
def test_explode_entity_into_layout(self, text):
layout = VirtualLayout()
entities = text2path.explode(text, kind=Kind.LWPOLYLINES, target=layout)
assert len(entities) == len(
layout
), "expected all entities added to the target layout"
def test_explode_entity_into_the_void(self, text):
assert (
text.get_layout() is None
), "source entity should not have a layout"
entities = text2path.explode(text, kind=Kind.LWPOLYLINES, target=None)
assert len(entities) == 4, "explode should work without a target layout"
if __name__ == "__main__":
pytest.main([__file__])
| mit |
andaag/scikit-learn | sklearn/calibration.py | 137 | 18876 | """Calibration of predicted probabilities."""
# Author: Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import inspect
import warnings
from math import log
import numpy as np
from scipy.optimize import fmin_bfgs
from .base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from .preprocessing import LabelBinarizer
from .utils import check_X_y, check_array, indexable, column_or_1d
from .utils.validation import check_is_fitted
from .isotonic import IsotonicRegression
from .svm import LinearSVC
from .cross_validation import check_cv
from .metrics.classification import _check_binary_probabilistic_predictions
class CalibratedClassifierCV(BaseEstimator, ClassifierMixin):
"""Probability calibration with isotonic regression or sigmoid.
With this class, the base_estimator is fit on the train set of the
cross-validation generator and the test set is used for calibration.
The probabilities for each of the folds are then averaged
for prediction. In case that cv="prefit" is passed to __init__,
it is it is assumed that base_estimator has been
fitted already and all data is used for calibration. Note that
data for fitting the classifier and for calibrating it must be disjpint.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. If cv=prefit, the
classifier must have been fit already on data.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parameteric approach. It is not advised to use isotonic calibration
with too few calibration samples (<<1000) since it tends to overfit.
Use sigmoids (Platt's calibration) in this case.
cv : integer or cross-validation generator or "prefit", optional
If an integer is passed, it is the number of folds (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
If "prefit" is passed, it is assumed that base_estimator has been
fitted already and all data is used for calibration.
Attributes
----------
classes_ : array, shape (n_classes)
The class labels.
calibrated_classifiers_: list (len() equal to cv or 1 if cv == "prefit")
The list of calibrated classifiers, one for each crossvalidation fold,
which has been fitted on all but the validation fold and calibrated
on the validation fold.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator=None, method='sigmoid', cv=3):
self.base_estimator = base_estimator
self.method = method
self.cv = cv
def fit(self, X, y, sample_weight=None):
"""Fit the calibrated model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
X, y = indexable(X, y)
lb = LabelBinarizer().fit(y)
self.classes_ = lb.classes_
# Check that we each cross-validation fold can have at least one
# example per class
n_folds = self.cv if isinstance(self.cv, int) \
else self.cv.n_folds if hasattr(self.cv, "n_folds") else None
if n_folds and \
np.any([np.sum(y == class_) < n_folds for class_ in self.classes_]):
raise ValueError("Requesting %d-fold cross-validation but provided"
" less than %d examples for at least one class."
% (n_folds, n_folds))
self.calibrated_classifiers_ = []
if self.base_estimator is None:
# we want all classifiers that don't expose a random_state
# to be deterministic (and we don't want to expose this one).
base_estimator = LinearSVC(random_state=0)
else:
base_estimator = self.base_estimator
if self.cv == "prefit":
calibrated_classifier = _CalibratedClassifier(
base_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X, y, sample_weight)
else:
calibrated_classifier.fit(X, y)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
cv = check_cv(self.cv, X, y, classifier=True)
arg_names = inspect.getargspec(base_estimator.fit)[0]
estimator_name = type(base_estimator).__name__
if (sample_weight is not None
and "sample_weight" not in arg_names):
warnings.warn("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
base_estimator_sample_weight = None
else:
base_estimator_sample_weight = sample_weight
for train, test in cv:
this_estimator = clone(base_estimator)
if base_estimator_sample_weight is not None:
this_estimator.fit(
X[train], y[train],
sample_weight=base_estimator_sample_weight[train])
else:
this_estimator.fit(X[train], y[train])
calibrated_classifier = _CalibratedClassifier(
this_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X[test], y[test],
sample_weight[test])
else:
calibrated_classifier.fit(X[test], y[test])
self.calibrated_classifiers_.append(calibrated_classifier)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
# Compute the arithmetic mean of the predictions of the calibrated
# classfiers
mean_proba = np.zeros((X.shape[0], len(self.classes_)))
for calibrated_classifier in self.calibrated_classifiers_:
proba = calibrated_classifier.predict_proba(X)
mean_proba += proba
mean_proba /= len(self.calibrated_classifiers_)
return mean_proba
def predict(self, X):
"""Predict the target of new samples. Can be different from the
prediction of the uncalibrated classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples,)
The predicted class.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
class _CalibratedClassifier(object):
"""Probability calibration with isotonic regression or sigmoid.
It assumes that base_estimator has already been fit, and trains the
calibration on the input set of the fit function. Note that this class
should not be used as an estimator directly. Use CalibratedClassifierCV
with cv="prefit" instead.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. No default value since
it has to be an already fitted estimator.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parameteric approach based on isotonic regression.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator, method='sigmoid'):
self.base_estimator = base_estimator
self.method = method
def _preproc(self, X):
n_classes = len(self.classes_)
if hasattr(self.base_estimator, "decision_function"):
df = self.base_estimator.decision_function(X)
if df.ndim == 1:
df = df[:, np.newaxis]
elif hasattr(self.base_estimator, "predict_proba"):
df = self.base_estimator.predict_proba(X)
if n_classes == 2:
df = df[:, 1:]
else:
raise RuntimeError('classifier has no decision_function or '
'predict_proba method.')
idx_pos_class = np.arange(df.shape[1])
return df, idx_pos_class
def fit(self, X, y, sample_weight=None):
"""Calibrate the fitted model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
lb = LabelBinarizer()
Y = lb.fit_transform(y)
self.classes_ = lb.classes_
df, idx_pos_class = self._preproc(X)
self.calibrators_ = []
for k, this_df in zip(idx_pos_class, df.T):
if self.method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif self.method == 'sigmoid':
calibrator = _SigmoidCalibration()
else:
raise ValueError('method should be "sigmoid" or '
'"isotonic". Got %s.' % self.method)
calibrator.fit(this_df, Y[:, k], sample_weight)
self.calibrators_.append(calibrator)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas. Can be exact zeros.
"""
n_classes = len(self.classes_)
proba = np.zeros((X.shape[0], n_classes))
df, idx_pos_class = self._preproc(X)
for k, this_df, calibrator in \
zip(idx_pos_class, df.T, self.calibrators_):
if n_classes == 2:
k += 1
proba[:, k] = calibrator.predict(this_df)
# Normalize the probabilities
if n_classes == 2:
proba[:, 0] = 1. - proba[:, 1]
else:
proba /= np.sum(proba, axis=1)[:, np.newaxis]
# XXX : for some reason all probas can be 0
proba[np.isnan(proba)] = 1. / n_classes
# Deal with cases where the predicted probability minimally exceeds 1.0
proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
return proba
def _sigmoid_calibration(df, y, sample_weight=None):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
df : ndarray, shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray, shape (n_samples,)
The targets.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
df = column_or_1d(df)
y = column_or_1d(y)
F = df # F follows Platt's notations
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
# Bayesian priors (see Platt end of section 2.2)
prior0 = float(np.sum(y <= 0))
prior1 = y.shape[0] - prior0
T = np.zeros(y.shape)
T[y > 0] = (prior1 + 1.) / (prior1 + 2.)
T[y <= 0] = 1. / (prior0 + 2.)
T1 = 1. - T
def objective(AB):
# From Platt (beginning of Section 2.2)
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
l = -(T * np.log(P + tiny) + T1 * np.log(1. - P + tiny))
if sample_weight is not None:
return (sample_weight * l).sum()
else:
return l.sum()
def grad(AB):
# gradient of the objective function
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
TEP_minus_T1P = P * (T * E - T1)
if sample_weight is not None:
TEP_minus_T1P *= sample_weight
dA = np.dot(TEP_minus_T1P, F)
dB = np.sum(TEP_minus_T1P)
return np.array([dA, dB])
AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))])
AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
return AB_[0], AB_[1]
class _SigmoidCalibration(BaseEstimator, RegressorMixin):
"""Sigmoid regression model.
Attributes
----------
a_ : float
The slope.
b_ : float
The intercept.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples,)
Training data.
y : array-like, shape (n_samples,)
Training target.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
X, y = indexable(X, y)
self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
return self
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like, shape (n_samples,)
Data to predict from.
Returns
-------
T_ : array, shape (n_samples,)
The predicted data.
"""
T = column_or_1d(T)
return 1. / (1. + np.exp(self.a_ * T + self.b_))
def calibration_curve(y_true, y_prob, normalize=False, n_bins=5):
"""Compute true and predicted probabilities for a calibration curve.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
normalize : bool, optional, default=False
Whether y_prob needs to be normalized into the bin [0, 1], i.e. is not
a proper probability. If True, the smallest value in y_prob is mapped
onto 0 and the largest one onto 1.
n_bins : int
Number of bins. A bigger number requires more data.
Returns
-------
prob_true : array, shape (n_bins,)
The true probability in each bin (fraction of positives).
prob_pred : array, shape (n_bins,)
The mean predicted probability in each bin.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if normalize: # Normalize predicted values into interval [0, 1]
y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min())
elif y_prob.min() < 0 or y_prob.max() > 1:
raise ValueError("y_prob has values outside [0, 1] and normalize is "
"set to False.")
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
bins = np.linspace(0., 1. + 1e-8, n_bins + 1)
binids = np.digitize(y_prob, bins) - 1
bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
bin_total = np.bincount(binids, minlength=len(bins))
nonzero = bin_total != 0
prob_true = (bin_true[nonzero] / bin_total[nonzero])
prob_pred = (bin_sums[nonzero] / bin_total[nonzero])
return prob_true, prob_pred
| bsd-3-clause |
larsoner/mne-python | mne/decoding/tests/test_base.py | 12 | 15702 | # Author: Jean-Remi King, <[email protected]>
# Marijn van Vliet, <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_equal, assert_allclose, assert_array_less)
import pytest
from mne import create_info, EpochsArray
from mne.fixes import is_regressor, is_classifier
from mne.utils import requires_sklearn, requires_version
from mne.decoding.base import (_get_inverse_funcs, LinearModel, get_coef,
cross_val_multiscore, BaseEstimator)
from mne.decoding.search_light import SlidingEstimator
from mne.decoding import (Scaler, TransformerMixin, Vectorizer,
GeneralizingEstimator)
def _make_data(n_samples=1000, n_features=5, n_targets=3):
"""Generate some testing data.
Parameters
----------
n_samples : int
The number of samples.
n_features : int
The number of features.
n_targets : int
The number of targets.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The measured data.
Y : ndarray, shape (n_samples, n_targets)
The latent variables generating the data.
A : ndarray, shape (n_features, n_targets)
The forward model, mapping the latent variables (=Y) to the measured
data (=X).
"""
# Define Y latent factors
np.random.seed(0)
cov_Y = np.eye(n_targets) * 10 + np.random.rand(n_targets, n_targets)
cov_Y = (cov_Y + cov_Y.T) / 2.
mean_Y = np.random.rand(n_targets)
Y = np.random.multivariate_normal(mean_Y, cov_Y, size=n_samples)
# The Forward model
A = np.random.randn(n_features, n_targets)
X = Y.dot(A.T)
X += np.random.randn(n_samples, n_features) # add noise
X += np.random.rand(n_features) # Put an offset
return X, Y, A
@requires_sklearn
def test_get_coef():
"""Test getting linear coefficients (filters/patterns) from estimators."""
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn import svm
from sklearn.linear_model import Ridge
from sklearn.model_selection import GridSearchCV
lm_classification = LinearModel()
assert (is_classifier(lm_classification))
lm_regression = LinearModel(Ridge())
assert (is_regressor(lm_regression))
parameters = {'kernel': ['linear'], 'C': [1, 10]}
lm_gs_classification = LinearModel(
GridSearchCV(svm.SVC(), parameters, cv=2, refit=True, n_jobs=1))
assert (is_classifier(lm_gs_classification))
lm_gs_regression = LinearModel(
GridSearchCV(svm.SVR(), parameters, cv=2, refit=True, n_jobs=1))
assert (is_regressor(lm_gs_regression))
# Define a classifier, an invertible transformer and an non-invertible one.
class Clf(BaseEstimator):
def fit(self, X, y):
return self
class NoInv(TransformerMixin):
def fit(self, X, y):
return self
def transform(self, X):
return X
class Inv(NoInv):
def inverse_transform(self, X):
return X
X, y, A = _make_data(n_samples=1000, n_features=3, n_targets=1)
# I. Test inverse function
# Check that we retrieve the right number of inverse functions even if
# there are nested pipelines
good_estimators = [
(1, make_pipeline(Inv(), Clf())),
(2, make_pipeline(Inv(), Inv(), Clf())),
(3, make_pipeline(Inv(), make_pipeline(Inv(), Inv()), Clf())),
]
for expected_n, est in good_estimators:
est.fit(X, y)
assert (expected_n == len(_get_inverse_funcs(est)))
bad_estimators = [
Clf(), # no preprocessing
Inv(), # final estimator isn't classifier
make_pipeline(NoInv(), Clf()), # first step isn't invertible
make_pipeline(Inv(), make_pipeline(
Inv(), NoInv()), Clf()), # nested step isn't invertible
]
for est in bad_estimators:
est.fit(X, y)
invs = _get_inverse_funcs(est)
assert_equal(invs, list())
# II. Test get coef for classification/regression estimators and pipelines
rng = np.random.RandomState(0)
for clf in (lm_regression,
lm_gs_classification,
make_pipeline(StandardScaler(), lm_classification),
make_pipeline(StandardScaler(), lm_gs_regression)):
# generate some categorical/continuous data
# according to the type of estimator.
if is_classifier(clf):
n, n_features = 1000, 3
X = rng.rand(n, n_features)
y = np.arange(n) % 2
else:
X, y, A = _make_data(n_samples=1000, n_features=3, n_targets=1)
y = np.ravel(y)
clf.fit(X, y)
# Retrieve final linear model
filters = get_coef(clf, 'filters_', False)
if hasattr(clf, 'steps'):
if hasattr(clf.steps[-1][-1].model, 'best_estimator_'):
# Linear Model with GridSearchCV
coefs = clf.steps[-1][-1].model.best_estimator_.coef_
else:
# Standard Linear Model
coefs = clf.steps[-1][-1].model.coef_
else:
if hasattr(clf.model, 'best_estimator_'):
# Linear Model with GridSearchCV
coefs = clf.model.best_estimator_.coef_
else:
# Standard Linear Model
coefs = clf.model.coef_
if coefs.ndim == 2 and coefs.shape[0] == 1:
coefs = coefs[0]
assert_array_equal(filters, coefs)
patterns = get_coef(clf, 'patterns_', False)
assert (filters[0] != patterns[0])
n_chans = X.shape[1]
assert_array_equal(filters.shape, patterns.shape, [n_chans, n_chans])
# Inverse transform linear model
filters_inv = get_coef(clf, 'filters_', True)
assert (filters[0] != filters_inv[0])
patterns_inv = get_coef(clf, 'patterns_', True)
assert (patterns[0] != patterns_inv[0])
class _Noop(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X):
return X.copy()
inverse_transform = transform
@requires_sklearn
@pytest.mark.parametrize('inverse', (True, False))
@pytest.mark.parametrize('Scale, kwargs', [
(Scaler, dict(info=None, scalings='mean')),
(_Noop, dict()),
])
def test_get_coef_inverse_transform(inverse, Scale, kwargs):
"""Test get_coef with and without inverse_transform."""
from sklearn.linear_model import Ridge
from sklearn.pipeline import make_pipeline
lm_regression = LinearModel(Ridge())
X, y, A = _make_data(n_samples=1000, n_features=3, n_targets=1)
# Check with search_light and combination of preprocessing ending with sl:
# slider = SlidingEstimator(make_pipeline(StandardScaler(), lm_regression))
# XXX : line above should work but does not as only last step is
# used in get_coef ...
slider = SlidingEstimator(make_pipeline(lm_regression))
X = np.transpose([X, -X], [1, 2, 0]) # invert X across 2 time samples
clf = make_pipeline(Scale(**kwargs), slider)
clf.fit(X, y)
patterns = get_coef(clf, 'patterns_', inverse)
filters = get_coef(clf, 'filters_', inverse)
assert_array_equal(filters.shape, patterns.shape, X.shape[1:])
# the two time samples get inverted patterns
assert_equal(patterns[0, 0], -patterns[0, 1])
for t in [0, 1]:
filters_t = get_coef(
clf.named_steps['slidingestimator'].estimators_[t],
'filters_', False)
if Scale is _Noop:
assert_array_equal(filters_t, filters[:, t])
@requires_sklearn
@pytest.mark.parametrize('n_features', [1, 5])
@pytest.mark.parametrize('n_targets', [1, 3])
def test_get_coef_multiclass(n_features, n_targets):
"""Test get_coef on multiclass problems."""
# Check patterns with more than 1 regressor
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.pipeline import make_pipeline
X, Y, A = _make_data(
n_samples=30000, n_features=n_features, n_targets=n_targets)
lm = LinearModel(LinearRegression()).fit(X, Y)
assert_array_equal(lm.filters_.shape, lm.patterns_.shape)
if n_targets == 1:
want_shape = (n_features,)
else:
want_shape = (n_targets, n_features)
assert_array_equal(lm.filters_.shape, want_shape)
if n_features > 1 and n_targets > 1:
assert_array_almost_equal(A, lm.patterns_.T, decimal=2)
lm = LinearModel(Ridge(alpha=0))
clf = make_pipeline(lm)
clf.fit(X, Y)
if n_features > 1 and n_targets > 1:
assert_allclose(A, lm.patterns_.T, atol=2e-2)
coef = get_coef(clf, 'patterns_', inverse_transform=True)
assert_allclose(lm.patterns_, coef, atol=1e-5)
# With epochs, scaler, and vectorizer (typical use case)
X_epo = X.reshape(X.shape + (1,))
info = create_info(n_features, 1000., 'eeg')
lm = LinearModel(Ridge(alpha=1))
clf = make_pipeline(
Scaler(info, scalings=dict(eeg=1.)), # XXX adding this step breaks
Vectorizer(),
lm,
)
clf.fit(X_epo, Y)
if n_features > 1 and n_targets > 1:
assert_allclose(A, lm.patterns_.T, atol=2e-2)
coef = get_coef(clf, 'patterns_', inverse_transform=True)
lm_patterns_ = lm.patterns_[..., np.newaxis]
assert_allclose(lm_patterns_, coef, atol=1e-5)
# Check can pass fitting parameters
lm.fit(X, Y, sample_weight=np.ones(len(Y)))
@requires_version('sklearn', '0.22') # roc_auc_ovr_weighted
@pytest.mark.parametrize('n_classes, n_channels, n_times', [
(4, 10, 2),
(4, 3, 2),
(3, 2, 1),
(3, 1, 2),
])
def test_get_coef_multiclass_full(n_classes, n_channels, n_times):
"""Test a full example with pattern extraction."""
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
data = np.zeros((10 * n_classes, n_channels, n_times))
# Make only the first channel informative
for ii in range(n_classes):
data[ii * 10:(ii + 1) * 10, 0] = ii
events = np.zeros((len(data), 3), int)
events[:, 0] = np.arange(len(events))
events[:, 2] = data[:, 0, 0]
info = create_info(n_channels, 1000., 'eeg')
epochs = EpochsArray(data, info, events, tmin=0)
clf = make_pipeline(
Scaler(epochs.info), Vectorizer(),
LinearModel(LogisticRegression(random_state=0, multi_class='ovr')),
)
scorer = 'roc_auc_ovr_weighted'
time_gen = GeneralizingEstimator(clf, scorer, verbose=True)
X = epochs.get_data()
y = epochs.events[:, 2]
n_splits = 3
cv = StratifiedKFold(n_splits=n_splits)
scores = cross_val_multiscore(time_gen, X, y, cv=cv, verbose=True)
want = (n_splits,)
if n_times > 1:
want += (n_times, n_times)
assert scores.shape == want
assert_array_less(0.8, scores)
clf.fit(X, y)
patterns = get_coef(clf, 'patterns_', inverse_transform=True)
assert patterns.shape == (n_classes, n_channels, n_times)
assert_allclose(patterns[:, 1:], 0., atol=1e-7) # no other channels useful
@requires_sklearn
def test_linearmodel():
"""Test LinearModel class for computing filters and patterns."""
# check categorical target fit in standard linear model
from sklearn.linear_model import LinearRegression
rng = np.random.RandomState(0)
clf = LinearModel()
n, n_features = 20, 3
X = rng.rand(n, n_features)
y = np.arange(n) % 2
clf.fit(X, y)
assert_equal(clf.filters_.shape, (n_features,))
assert_equal(clf.patterns_.shape, (n_features,))
with pytest.raises(ValueError):
wrong_X = rng.rand(n, n_features, 99)
clf.fit(wrong_X, y)
# check categorical target fit in standard linear model with GridSearchCV
from sklearn import svm
from sklearn.model_selection import GridSearchCV
parameters = {'kernel': ['linear'], 'C': [1, 10]}
clf = LinearModel(
GridSearchCV(svm.SVC(), parameters, cv=2, refit=True, n_jobs=1))
clf.fit(X, y)
assert_equal(clf.filters_.shape, (n_features,))
assert_equal(clf.patterns_.shape, (n_features,))
with pytest.raises(ValueError):
wrong_X = rng.rand(n, n_features, 99)
clf.fit(wrong_X, y)
# check continuous target fit in standard linear model with GridSearchCV
n_targets = 1
Y = rng.rand(n, n_targets)
clf = LinearModel(
GridSearchCV(svm.SVR(), parameters, cv=2, refit=True, n_jobs=1))
clf.fit(X, y)
assert_equal(clf.filters_.shape, (n_features, ))
assert_equal(clf.patterns_.shape, (n_features, ))
with pytest.raises(ValueError):
wrong_y = rng.rand(n, n_features, 99)
clf.fit(X, wrong_y)
# check multi-target fit in standard linear model
n_targets = 5
Y = rng.rand(n, n_targets)
clf = LinearModel(LinearRegression())
clf.fit(X, Y)
assert_equal(clf.filters_.shape, (n_targets, n_features))
assert_equal(clf.patterns_.shape, (n_targets, n_features))
with pytest.raises(ValueError):
wrong_y = rng.rand(n, n_features, 99)
clf.fit(X, wrong_y)
@requires_sklearn
def test_cross_val_multiscore():
"""Test cross_val_multiscore for computing scores on decoding over time."""
from sklearn.model_selection import KFold, StratifiedKFold, cross_val_score
from sklearn.linear_model import LogisticRegression, LinearRegression
logreg = LogisticRegression(solver='liblinear', random_state=0)
# compare to cross-val-score
X = np.random.rand(20, 3)
y = np.arange(20) % 2
cv = KFold(2, random_state=0, shuffle=True)
clf = logreg
assert_array_equal(cross_val_score(clf, X, y, cv=cv),
cross_val_multiscore(clf, X, y, cv=cv))
# Test with search light
X = np.random.rand(20, 4, 3)
y = np.arange(20) % 2
clf = SlidingEstimator(logreg, scoring='accuracy')
scores_acc = cross_val_multiscore(clf, X, y, cv=cv)
assert_array_equal(np.shape(scores_acc), [2, 3])
# check values
scores_acc_manual = list()
for train, test in cv.split(X, y):
clf.fit(X[train], y[train])
scores_acc_manual.append(clf.score(X[test], y[test]))
assert_array_equal(scores_acc, scores_acc_manual)
# check scoring metric
# raise an error if scoring is defined at cross-val-score level and
# search light, because search light does not return a 1-dimensional
# prediction.
pytest.raises(ValueError, cross_val_multiscore, clf, X, y, cv=cv,
scoring='roc_auc')
clf = SlidingEstimator(logreg, scoring='roc_auc')
scores_auc = cross_val_multiscore(clf, X, y, cv=cv, n_jobs=1)
scores_auc_manual = list()
for train, test in cv.split(X, y):
clf.fit(X[train], y[train])
scores_auc_manual.append(clf.score(X[test], y[test]))
assert_array_equal(scores_auc, scores_auc_manual)
# indirectly test that cross_val_multiscore rightly detects the type of
# estimator and generates a StratifiedKFold for classiers and a KFold
# otherwise
X = np.random.randn(1000, 3)
y = np.ones(1000, dtype=int)
y[::2] = 0
clf = logreg
reg = LinearRegression()
for cross_val in (cross_val_score, cross_val_multiscore):
manual = cross_val(clf, X, y, cv=StratifiedKFold(2))
auto = cross_val(clf, X, y, cv=2)
assert_array_equal(manual, auto)
manual = cross_val(reg, X, y, cv=KFold(2))
auto = cross_val(reg, X, y, cv=2)
assert_array_equal(manual, auto)
| bsd-3-clause |
rohanp/scikit-learn | sklearn/tree/tests/test_export.py | 31 | 9588 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from re import finditer
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
from sklearn.utils.testing import assert_in
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 1], [-1, 1], [1, 2], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=2,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
out = StringIO()
export_graphviz(clf, out_file=out, class_names=["yes", "no"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False,
proportion=True, special_characters=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, class_names=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, filled=True,
node_ids=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=2,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[3.0, 1.0, 0.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="samples = 3\\nvalue = [[3, 0, 0]\\n' \
'[3, 0, 0]]", fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n' \
'[0.0, 1.0, 0.5]]", fillcolor="#e5813986"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'3 [label="samples = 2\\nvalue = [[0, 1, 0]\\n' \
'[0, 1, 0]]", fillcolor="#e58139ff"] ;\n' \
'2 -> 3 ;\n' \
'4 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'2 -> 4 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=2,
criterion="mse",
random_state=2)
clf.fit(X, y)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True,
rotate=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e5813980"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=2)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
def test_friedman_mse_in_graphviz():
clf = DecisionTreeRegressor(criterion="friedman_mse", random_state=0)
clf.fit(X, y)
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data)
clf = GradientBoostingClassifier(n_estimators=2, random_state=0)
clf.fit(X, y)
for estimator in clf.estimators_:
export_graphviz(estimator[0], out_file=dot_data)
for finding in finditer("\[.*?samples.*?\]", dot_data.getvalue()):
assert_in("friedman_mse", finding.group())
| bsd-3-clause |
aarondewindt/paparazzi_torrap | sw/tools/calibration/calib_mag_live.py | 13 | 6700 | #! /usr/bin/env python
from __future__ import print_function, division
import time
import logging
import sys
from os import path, getenv
# if PAPARAZZI_SRC not set, then assume the tree containing this
# file is a reasonable substitute
PPRZ_SRC = getenv("PAPARAZZI_SRC", path.normpath(path.join(path.dirname(path.abspath(__file__)), '../../../')))
sys.path.append(PPRZ_SRC + "/sw/lib/python")
sys.path.append(PPRZ_SRC + "/sw/ext/pprzlink/lib/v1.0/python")
PPRZ_HOME = getenv("PAPARAZZI_HOME", PPRZ_SRC)
from pprzlink.ivy import IvyMessagesInterface
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.animation as animation
import scipy
from scipy import optimize
import calibration_utils
class MagPlot(object):
def __init__(self):
# Setup the figure and axes...
self.fig = plt.figure()
#self.ax = self.fig.add_subplot(1, 1, 1, projection='3d')
self.ax = p3.Axes3D(self.fig)
self.ax.set_aspect('equal')
self.ax.set_xlabel('X')
self.ax.set_ylabel('Y')
self.ax.set_zlabel('Z')
self.set_ax_lim(10)
self.ax.set_title("Raw Mag data")
self.data = np.zeros((1, 3))
self.max_lim = 1
# Then setup FuncAnimation.
self.ani = animation.FuncAnimation(self.fig, self.update, interval=100, init_func=self.setup_plot, blit=False)
def set_ax_lim(self, lim):
lim = [-lim, lim]
self.ax.set_xlim3d(lim)
self.ax.set_ylim3d(lim)
self.ax.set_zlim3d(lim)
def setup_plot(self):
x = self.data[:, 0]
y = self.data[:, 1]
z = self.data[:, 2]
self.scat = self.ax.scatter(x, y, z, alpha=1)
# For FuncAnimation's sake, we need to return the artist we'll be using
# Note that it expects a sequence of artists, thus the trailing comma.
return self.scat,
def show(self, block=True):
plt.show(block=block)
def update(self, i):
logging.debug("updating scatter: %d with %s" % (i, len(self.data)))
self.scat.set_offsets(self.data[:, 0:2])
self.scat.set_3d_properties(self.data[:, 2], 'z')
# We need to return the updated artist for FuncAnimation to draw..
# Note that it expects a sequence of artists, thus the trailing comma.
return self.scat,
def add_data(self, data):
logging.debug("adding data %s" % data)
if len(self.data) == 1 and not np.any(self.data):
self.data[0] = np.array(data)
else:
self.data = np.vstack((self.data, np.array(data)))
max_lim = np.max(np.abs(data))
if max_lim > self.max_lim:
self.max_lim = max_lim
self.set_ax_lim(max_lim)
class MagCalibrator(object):
def __init__(self, plot_results=True, verbose=False):
self._interface = IvyMessagesInterface("calib_mag")
self.plotter = MagPlot()
self.data = []
self.flt_meas = []
self.p0 = np.array([0, 0, 0, 0, 0, 0])
self.optimization_done = False
self.plot_results = plot_results
def start_collect(self):
self._interface.subscribe(self.message_recv, "(.*IMU_MAG_RAW.*)")
def stop_collect(self):
self._interface.unsubscribe_all()
def message_recv(self, ac_id, msg):
self.data.append(np.array([int(v) for v in msg.fieldvalues]))
if self.plot_results:
self.plotter.add_data((map(int, msg.fieldvalues)))
def shutdown(self):
if self._interface is not None:
print("Shutting down ivy interface...")
self._interface.shutdown()
self._interface = None
def __del__(self):
self.shutdown()
def calc_min_max_guess(self):
if len(self.data) > 3:
# filter out noisy measurements?
self.flt_meas = np.array(self.data)
self.p0 = calibration_utils.get_min_max_guess(self.flt_meas, 1.0)
def print_min_max_guess(self):
self.calc_min_max_guess()
if self.data:
print("Current guess from %d measurements: neutral [%d, %d, %d], scale [%.3f, %.3f, %.3f]" % (len(self.flt_meas),
int(round(self.p0[0])), int(round(self.p0[1])), int(round(self.p0[2])),
self.p0[3]*2**11, self.p0[4]*2**11, self.p0[5]*2**11))
def calibrate(self):
self.calc_min_max_guess()
if len(self.flt_meas) < 10:
logging.warning("Not enough measurements")
return
cp0, np0 = calibration_utils.scale_measurements(self.flt_meas, self.p0)
logging.info("initial guess : avg "+str(np0.mean())+" std "+str(np0.std()))
calibration_utils.print_xml(self.p0, "MAG", 11)
def err_func(p, meas, y):
cp, np = calibration_utils.scale_measurements(meas, p)
err = y * scipy.ones(len(meas)) - np
return err
p1, cov, info, msg, success = optimize.leastsq(err_func, self.p0[:], args=(self.flt_meas, 1.0), full_output=1)
self.optimization_done = success in [1, 2, 3, 4]
if not self.optimization_done:
logging.warning("Optimization error: ", msg)
cp1, np1 = calibration_utils.scale_measurements(self.flt_meas, p1)
if self.optimization_done:
logging.info("optimized guess : avg " + str(np1.mean()) + " std " + str(np1.std()))
calibration_utils.print_xml(p1, "MAG", 11)
else:
logging.info("last iteration of failed optimized guess : avg " + str(np1.mean()) + " std " + str(np1.std()))
if self.plot_results:
calibration_utils.plot_results("MAG", np.array(self.data), range(len(self.data)),
self.flt_meas, cp0, np0, cp1, np1, 1.0, blocking=False)
calibration_utils.plot_mag_3d(self.flt_meas, cp1, p1)
if __name__ == '__main__':
import argparse
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--plot', action='store_true', help='Interactive plot')
args = parser.parse_args()
if args.plot:
print("Close the interactive plot window to run the final calibration.")
else:
print("Press CTRL-C to stop data collection and run the final calibration.")
try:
mc = MagCalibrator(plot_results=args.plot)
mc.start_collect()
if args.plot:
mc.plotter.show()
else:
while True:
time.sleep(2)
mc.print_min_max_guess()
except KeyboardInterrupt:
print("Stopping on request")
mc.stop_collect()
mc.calibrate()
mc.shutdown()
| gpl-2.0 |
sserrot/champion_relationships | venv/Lib/site-packages/ipywidgets/widgets/interaction.py | 1 | 20907 | # Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""Interact with functions using widgets."""
from __future__ import print_function
from __future__ import division
try: # Python >= 3.3
from inspect import signature, Parameter
except ImportError:
from IPython.utils.signatures import signature, Parameter
from inspect import getcallargs
try:
from inspect import getfullargspec as check_argspec
except ImportError:
from inspect import getargspec as check_argspec # py2
import sys
from IPython.core.getipython import get_ipython
from . import (ValueWidget, Text,
FloatSlider, IntSlider, Checkbox, Dropdown,
VBox, Button, DOMWidget, Output)
from IPython.display import display, clear_output
from ipython_genutils.py3compat import string_types, unicode_type
from traitlets import HasTraits, Any, Unicode, observe
from numbers import Real, Integral
from warnings import warn
try:
from collections.abc import Iterable, Mapping
except ImportError:
from collections import Iterable, Mapping # py2
empty = Parameter.empty
def show_inline_matplotlib_plots():
"""Show matplotlib plots immediately if using the inline backend.
With ipywidgets 6.0, matplotlib plots don't work well with interact when
using the inline backend that comes with ipykernel. Basically, the inline
backend only shows the plot after the entire cell executes, which does not
play well with drawing plots inside of an interact function. See
https://github.com/jupyter-widgets/ipywidgets/issues/1181/ and
https://github.com/ipython/ipython/issues/10376 for more details. This
function displays any matplotlib plots if the backend is the inline backend.
"""
if 'matplotlib' not in sys.modules:
# matplotlib hasn't been imported, nothing to do.
return
try:
import matplotlib as mpl
from ipykernel.pylab.backend_inline import flush_figures
except ImportError:
return
if mpl.get_backend() == 'module://ipykernel.pylab.backend_inline':
flush_figures()
def interactive_output(f, controls):
"""Connect widget controls to a function.
This function does not generate a user interface for the widgets (unlike `interact`).
This enables customisation of the widget user interface layout.
The user interface layout must be defined and displayed manually.
"""
out = Output()
def observer(change):
kwargs = {k:v.value for k,v in controls.items()}
show_inline_matplotlib_plots()
with out:
clear_output(wait=True)
f(**kwargs)
show_inline_matplotlib_plots()
for k,w in controls.items():
w.observe(observer, 'value')
show_inline_matplotlib_plots()
observer(None)
return out
def _matches(o, pattern):
"""Match a pattern of types in a sequence."""
if not len(o) == len(pattern):
return False
comps = zip(o,pattern)
return all(isinstance(obj,kind) for obj,kind in comps)
def _get_min_max_value(min, max, value=None, step=None):
"""Return min, max, value given input values with possible None."""
# Either min and max need to be given, or value needs to be given
if value is None:
if min is None or max is None:
raise ValueError('unable to infer range, value from: ({0}, {1}, {2})'.format(min, max, value))
diff = max - min
value = min + (diff / 2)
# Ensure that value has the same type as diff
if not isinstance(value, type(diff)):
value = min + (diff // 2)
else: # value is not None
if not isinstance(value, Real):
raise TypeError('expected a real number, got: %r' % value)
# Infer min/max from value
if value == 0:
# This gives (0, 1) of the correct type
vrange = (value, value + 1)
elif value > 0:
vrange = (-value, 3*value)
else:
vrange = (3*value, -value)
if min is None:
min = vrange[0]
if max is None:
max = vrange[1]
if step is not None:
# ensure value is on a step
tick = int((value - min) / step)
value = min + tick * step
if not min <= value <= max:
raise ValueError('value must be between min and max (min={0}, value={1}, max={2})'.format(min, value, max))
return min, max, value
def _yield_abbreviations_for_parameter(param, kwargs):
"""Get an abbreviation for a function parameter."""
name = param.name
kind = param.kind
ann = param.annotation
default = param.default
not_found = (name, empty, empty)
if kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY):
if name in kwargs:
value = kwargs.pop(name)
elif ann is not empty:
warn("Using function annotations to implicitly specify interactive controls is deprecated. Use an explicit keyword argument for the parameter instead.", DeprecationWarning)
value = ann
elif default is not empty:
value = default
else:
yield not_found
yield (name, value, default)
elif kind == Parameter.VAR_KEYWORD:
# In this case name=kwargs and we yield the items in kwargs with their keys.
for k, v in kwargs.copy().items():
kwargs.pop(k)
yield k, v, empty
class interactive(VBox):
"""
A VBox container containing a group of interactive widgets tied to a
function.
Parameters
----------
__interact_f : function
The function to which the interactive widgets are tied. The `**kwargs`
should match the function signature.
__options : dict
A dict of options. Currently, the only supported keys are
``"manual"`` and ``"manual_name"``.
**kwargs : various, optional
An interactive widget is created for each keyword argument that is a
valid widget abbreviation.
Note that the first two parameters intentionally start with a double
underscore to avoid being mixed up with keyword arguments passed by
``**kwargs``.
"""
def __init__(self, __interact_f, __options={}, **kwargs):
VBox.__init__(self, _dom_classes=['widget-interact'])
self.result = None
self.args = []
self.kwargs = {}
self.f = f = __interact_f
self.clear_output = kwargs.pop('clear_output', True)
self.manual = __options.get("manual", False)
self.manual_name = __options.get("manual_name", "Run Interact")
self.auto_display = __options.get("auto_display", False)
new_kwargs = self.find_abbreviations(kwargs)
# Before we proceed, let's make sure that the user has passed a set of args+kwargs
# that will lead to a valid call of the function. This protects against unspecified
# and doubly-specified arguments.
try:
check_argspec(f)
except TypeError:
# if we can't inspect, we can't validate
pass
else:
getcallargs(f, **{n:v for n,v,_ in new_kwargs})
# Now build the widgets from the abbreviations.
self.kwargs_widgets = self.widgets_from_abbreviations(new_kwargs)
# This has to be done as an assignment, not using self.children.append,
# so that traitlets notices the update. We skip any objects (such as fixed) that
# are not DOMWidgets.
c = [w for w in self.kwargs_widgets if isinstance(w, DOMWidget)]
# If we are only to run the function on demand, add a button to request this.
if self.manual:
self.manual_button = Button(description=self.manual_name)
c.append(self.manual_button)
self.out = Output()
c.append(self.out)
self.children = c
# Wire up the widgets
# If we are doing manual running, the callback is only triggered by the button
# Otherwise, it is triggered for every trait change received
# On-demand running also suppresses running the function with the initial parameters
if self.manual:
self.manual_button.on_click(self.update)
# Also register input handlers on text areas, so the user can hit return to
# invoke execution.
for w in self.kwargs_widgets:
if isinstance(w, Text):
w.on_submit(self.update)
else:
for widget in self.kwargs_widgets:
widget.observe(self.update, names='value')
self.on_displayed(self.update)
# Callback function
def update(self, *args):
"""
Call the interact function and update the output widget with
the result of the function call.
Parameters
----------
*args : ignored
Required for this method to be used as traitlets callback.
"""
self.kwargs = {}
if self.manual:
self.manual_button.disabled = True
try:
show_inline_matplotlib_plots()
with self.out:
if self.clear_output:
clear_output(wait=True)
for widget in self.kwargs_widgets:
value = widget.get_interact_value()
self.kwargs[widget._kwarg] = value
self.result = self.f(**self.kwargs)
show_inline_matplotlib_plots()
if self.auto_display and self.result is not None:
display(self.result)
except Exception as e:
ip = get_ipython()
if ip is None:
self.log.warn("Exception in interact callback: %s", e, exc_info=True)
else:
ip.showtraceback()
finally:
if self.manual:
self.manual_button.disabled = False
# Find abbreviations
def signature(self):
return signature(self.f)
def find_abbreviations(self, kwargs):
"""Find the abbreviations for the given function and kwargs.
Return (name, abbrev, default) tuples.
"""
new_kwargs = []
try:
sig = self.signature()
except (ValueError, TypeError):
# can't inspect, no info from function; only use kwargs
return [ (key, value, value) for key, value in kwargs.items() ]
for param in sig.parameters.values():
for name, value, default in _yield_abbreviations_for_parameter(param, kwargs):
if value is empty:
raise ValueError('cannot find widget or abbreviation for argument: {!r}'.format(name))
new_kwargs.append((name, value, default))
return new_kwargs
# Abbreviations to widgets
def widgets_from_abbreviations(self, seq):
"""Given a sequence of (name, abbrev, default) tuples, return a sequence of Widgets."""
result = []
for name, abbrev, default in seq:
widget = self.widget_from_abbrev(abbrev, default)
if not (isinstance(widget, ValueWidget) or isinstance(widget, fixed)):
if widget is None:
raise ValueError("{!r} cannot be transformed to a widget".format(abbrev))
else:
raise TypeError("{!r} is not a ValueWidget".format(widget))
if not widget.description:
widget.description = name
widget._kwarg = name
result.append(widget)
return result
@classmethod
def widget_from_abbrev(cls, abbrev, default=empty):
"""Build a ValueWidget instance given an abbreviation or Widget."""
if isinstance(abbrev, ValueWidget) or isinstance(abbrev, fixed):
return abbrev
if isinstance(abbrev, tuple):
widget = cls.widget_from_tuple(abbrev)
if default is not empty:
try:
widget.value = default
except Exception:
# ignore failure to set default
pass
return widget
# Try single value
widget = cls.widget_from_single_value(abbrev)
if widget is not None:
return widget
# Something iterable (list, dict, generator, ...). Note that str and
# tuple should be handled before, that is why we check this case last.
if isinstance(abbrev, Iterable):
widget = cls.widget_from_iterable(abbrev)
if default is not empty:
try:
widget.value = default
except Exception:
# ignore failure to set default
pass
return widget
# No idea...
return None
@staticmethod
def widget_from_single_value(o):
"""Make widgets from single values, which can be used as parameter defaults."""
if isinstance(o, string_types):
return Text(value=unicode_type(o))
elif isinstance(o, bool):
return Checkbox(value=o)
elif isinstance(o, Integral):
min, max, value = _get_min_max_value(None, None, o)
return IntSlider(value=o, min=min, max=max)
elif isinstance(o, Real):
min, max, value = _get_min_max_value(None, None, o)
return FloatSlider(value=o, min=min, max=max)
else:
return None
@staticmethod
def widget_from_tuple(o):
"""Make widgets from a tuple abbreviation."""
if _matches(o, (Real, Real)):
min, max, value = _get_min_max_value(o[0], o[1])
if all(isinstance(_, Integral) for _ in o):
cls = IntSlider
else:
cls = FloatSlider
return cls(value=value, min=min, max=max)
elif _matches(o, (Real, Real, Real)):
step = o[2]
if step <= 0:
raise ValueError("step must be >= 0, not %r" % step)
min, max, value = _get_min_max_value(o[0], o[1], step=step)
if all(isinstance(_, Integral) for _ in o):
cls = IntSlider
else:
cls = FloatSlider
return cls(value=value, min=min, max=max, step=step)
@staticmethod
def widget_from_iterable(o):
"""Make widgets from an iterable. This should not be done for
a string or tuple."""
# Dropdown expects a dict or list, so we convert an arbitrary
# iterable to either of those.
if isinstance(o, (list, dict)):
return Dropdown(options=o)
elif isinstance(o, Mapping):
return Dropdown(options=list(o.items()))
else:
return Dropdown(options=list(o))
# Return a factory for interactive functions
@classmethod
def factory(cls):
options = dict(manual=False, auto_display=True, manual_name="Run Interact")
return _InteractFactory(cls, options)
class _InteractFactory(object):
"""
Factory for instances of :class:`interactive`.
This class is needed to support options like::
>>> @interact.options(manual=True)
... def greeting(text="World"):
... print("Hello {}".format(text))
Parameters
----------
cls : class
The subclass of :class:`interactive` to construct.
options : dict
A dict of options used to construct the interactive
function. By default, this is returned by
``cls.default_options()``.
kwargs : dict
A dict of **kwargs to use for widgets.
"""
def __init__(self, cls, options, kwargs={}):
self.cls = cls
self.opts = options
self.kwargs = kwargs
def widget(self, f):
"""
Return an interactive function widget for the given function.
The widget is only constructed, not displayed nor attached to
the function.
Returns
-------
An instance of ``self.cls`` (typically :class:`interactive`).
Parameters
----------
f : function
The function to which the interactive widgets are tied.
"""
return self.cls(f, self.opts, **self.kwargs)
def __call__(self, __interact_f=None, **kwargs):
"""
Make the given function interactive by adding and displaying
the corresponding :class:`interactive` widget.
Expects the first argument to be a function. Parameters to this
function are widget abbreviations passed in as keyword arguments
(``**kwargs``). Can be used as a decorator (see examples).
Returns
-------
f : __interact_f with interactive widget attached to it.
Parameters
----------
__interact_f : function
The function to which the interactive widgets are tied. The `**kwargs`
should match the function signature. Passed to :func:`interactive()`
**kwargs : various, optional
An interactive widget is created for each keyword argument that is a
valid widget abbreviation. Passed to :func:`interactive()`
Examples
--------
Render an interactive text field that shows the greeting with the passed in
text::
# 1. Using interact as a function
def greeting(text="World"):
print("Hello {}".format(text))
interact(greeting, text="IPython Widgets")
# 2. Using interact as a decorator
@interact
def greeting(text="World"):
print("Hello {}".format(text))
# 3. Using interact as a decorator with named parameters
@interact(text="IPython Widgets")
def greeting(text="World"):
print("Hello {}".format(text))
Render an interactive slider widget and prints square of number::
# 1. Using interact as a function
def square(num=1):
print("{} squared is {}".format(num, num*num))
interact(square, num=5)
# 2. Using interact as a decorator
@interact
def square(num=2):
print("{} squared is {}".format(num, num*num))
# 3. Using interact as a decorator with named parameters
@interact(num=5)
def square(num=2):
print("{} squared is {}".format(num, num*num))
"""
# If kwargs are given, replace self by a new
# _InteractFactory with the updated kwargs
if kwargs:
kw = dict(self.kwargs)
kw.update(kwargs)
self = type(self)(self.cls, self.opts, kw)
f = __interact_f
if f is None:
# This branch handles the case 3
# @interact(a=30, b=40)
# def f(*args, **kwargs):
# ...
#
# Simply return the new factory
return self
# positional arg support in: https://gist.github.com/8851331
# Handle the cases 1 and 2
# 1. interact(f, **kwargs)
# 2. @interact
# def f(*args, **kwargs):
# ...
w = self.widget(f)
try:
f.widget = w
except AttributeError:
# some things (instancemethods) can't have attributes attached,
# so wrap in a lambda
f = lambda *args, **kwargs: __interact_f(*args, **kwargs)
f.widget = w
show_inline_matplotlib_plots()
display(w)
return f
def options(self, **kwds):
"""
Change options for interactive functions.
Returns
-------
A new :class:`_InteractFactory` which will apply the
options when called.
"""
opts = dict(self.opts)
for k in kwds:
try:
# Ensure that the key exists because we want to change
# existing options, not add new ones.
_ = opts[k]
except KeyError:
raise ValueError("invalid option {!r}".format(k))
opts[k] = kwds[k]
return type(self)(self.cls, opts, self.kwargs)
interact = interactive.factory()
interact_manual = interact.options(manual=True, manual_name="Run Interact")
class fixed(HasTraits):
"""A pseudo-widget whose value is fixed and never synced to the client."""
value = Any(help="Any Python object")
description = Unicode('', help="Any Python object")
def __init__(self, value, **kwargs):
super(fixed, self).__init__(value=value, **kwargs)
def get_interact_value(self):
"""Return the value for this widget which should be passed to
interactive functions. Custom widgets can change this method
to process the raw value ``self.value``.
"""
return self.value
| mit |
crowdresearch/daemo | crowdsourcing/models.py | 2 | 38402 | import json
import os
import pandas as pd
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.postgres.fields import ArrayField, JSONField
from django.db import models
from django.utils import timezone
from crowdsourcing.utils import get_delimiter, get_worker_cache
class TimeStampable(models.Model):
created_at = models.DateTimeField(auto_now_add=True, auto_now=False)
updated_at = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
abstract = True
class StripeObject(models.Model):
stripe_id = models.CharField(max_length=128, db_index=True)
stripe_data = JSONField(null=True)
class Meta:
abstract = True
class ArchiveQuerySet(models.query.QuerySet):
def active(self):
return self.filter(deleted_at__isnull=True)
def inactive(self):
return self.filter(deleted_at__isnull=False)
class Archivable(models.Model):
deleted_at = models.DateTimeField(null=True)
objects = ArchiveQuerySet.as_manager()
class Meta:
abstract = True
def delete(self, using=None, keep_parents=False):
self.archive()
def archive(self):
self.deleted_at = timezone.now()
self.save()
def hard_delete(self, using=None, keep_parents=False):
super(Archivable, self).delete()
class Activable(models.Model):
is_active = models.BooleanField(default=True)
class Meta:
abstract = True
class Verifiable(models.Model):
is_verified = models.BooleanField(default=False)
class Meta:
abstract = True
class Revisable(models.Model):
revised_at = models.DateTimeField(auto_now_add=True, auto_now=False)
revision_log = models.CharField(max_length=512, null=True, blank=True)
group_id = models.IntegerField(null=True, db_index=True)
class Meta:
abstract = True
class Region(TimeStampable):
name = models.CharField(max_length=64, error_messages={'required': 'Please specify the region!'})
code = models.CharField(max_length=16, error_messages={'required': 'Please specify the region code!'})
class Country(TimeStampable):
name = models.CharField(max_length=64, error_messages={'required': 'Please specify the country!'})
code = models.CharField(max_length=8, error_messages={'required': 'Please specify the country code!'})
region = models.ForeignKey(Region, related_name='countries', null=True, blank=True)
def __unicode__(self):
return u'%s' % (self.name,)
class City(TimeStampable):
name = models.CharField(max_length=64, error_messages={'required': 'Please specify the city!'})
state = models.CharField(max_length=64, blank=True)
state_code = models.CharField(max_length=64, blank=True)
country = models.ForeignKey(Country, related_name='cities')
def __unicode__(self):
return u'%s' % (self.name,)
class Address(TimeStampable):
street = models.CharField(max_length=128, blank=True, null=True)
city = models.ForeignKey(City, related_name='addresses', null=True, blank=True)
postal_code = models.CharField(null=True, blank=True, max_length=32)
def __unicode__(self):
return u'%s, %s, %s' % (self.street, self.city.name, self.city.country.name)
class Language(TimeStampable):
name = models.CharField(max_length=64, error_messages={'required': 'Please specify the language!'})
iso_code = models.CharField(max_length=8)
class Skill(TimeStampable, Archivable, Verifiable):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the skill name!"})
description = models.CharField(max_length=512, error_messages={'required': "Please enter the skill description!"})
parent = models.ForeignKey('self', related_name='skills', null=True)
class Role(TimeStampable, Archivable, Activable):
name = models.CharField(max_length=32, unique=True,
error_messages={'required': 'Please specify the role name!',
'unique': 'The role %(value)r already exists. Please provide another name!'
})
class Currency(TimeStampable):
name = models.CharField(max_length=32)
iso_code = models.CharField(max_length=8)
class Category(TimeStampable, Archivable):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the category name!"})
parent = models.ForeignKey('self', related_name='categories', null=True)
class UserRegistration(TimeStampable):
user = models.OneToOneField(User)
activation_key = models.CharField(max_length=40)
class RegistrationWhitelist(TimeStampable):
email = models.EmailField(db_index=True)
valid_from = models.DateTimeField(null=True)
class UserPasswordReset(TimeStampable):
user = models.OneToOneField(User)
reset_key = models.CharField(max_length=40)
class UserProfile(TimeStampable, Verifiable):
MALE = 'M'
FEMALE = 'F'
OTHER = 'O'
DO_NOT_STATE = ('DNS', 'Prefer not to specify')
GENDER = (
(MALE, 'Male'),
(FEMALE, 'Female'),
(OTHER, 'Other')
)
PERSONAL = 'personal'
PROFESSIONAL = 'professional'
OTHER = 'other'
RESEARCH = 'research'
PURPOSE_OF_USE = (
(PROFESSIONAL, 'Professional'),
(PERSONAL, 'personal'),
(RESEARCH, 'research'),
(OTHER, 'other')
)
ETHNICITY = (
('white', 'White'),
('hispanic', 'Hispanic'),
('black', 'Black'),
('islander', 'Native Hawaiian or Other Pacific Islander'),
('indian', 'Indian'),
('asian', 'Asian'),
('native', 'Native American or Alaska Native'),
('mixed', 'Mixed Race'),
('other', 'Other')
)
INCOME = (
('less_1k', 'Less than $1,000'),
('1k', '$1,000 - $1,999'),
('2.5k', '$2,500 - $4,999'),
('5k', '$5,000 - $7,499'),
('7.5k', '$7,500 - $9,999'),
('10k', '$10,000 - $14,999'),
('15k', '$15,000 - $24,999'),
('25k', '$25,000 - $39,999'),
('40k', '$40,000 - $59,999'),
('60k', '$60,000 - $74,999'),
('75k', '$75,000 - $99,999'),
('100k', '$100,000 - $149,999'),
('150k', '$150,000 - $199,999'),
('200k', '$200,000 - $299,999'),
('300k_more', '$300,000 or more')
)
EDUCATION = (
('some_high', 'Some High School, No Degree'),
('high', 'High School Degree or Equivalent'),
('some_college', 'Some College, No Degree'),
('associates', 'Associates Degree'),
('bachelors', 'Bachelors Degree'),
('masters', 'Graduate Degree, Masters'),
('doctorate', 'Graduate Degree, Doctorate')
)
user = models.OneToOneField(User, related_name='profile')
gender = models.CharField(max_length=1, choices=GENDER, blank=True, null=True)
purpose_of_use = models.CharField(max_length=64, choices=PURPOSE_OF_USE, blank=True, null=True)
ethnicity = models.CharField(max_length=8, choices=ETHNICITY, blank=True, null=True)
job_title = models.CharField(max_length=100, blank=True, null=True)
address = models.ForeignKey(Address, related_name='+', blank=True, null=True)
birthday = models.DateTimeField(blank=True, null=True)
nationality = models.ManyToManyField(Country, through='UserCountry')
languages = models.ManyToManyField(Language, through='UserLanguage')
picture = models.BinaryField(null=True)
last_active = models.DateTimeField(auto_now_add=False, auto_now=False, null=True)
is_worker = models.BooleanField(default=True)
is_requester = models.BooleanField(default=False)
income = models.CharField(max_length=9, choices=INCOME, blank=True, null=True)
education = models.CharField(max_length=12, choices=EDUCATION, blank=True, null=True)
unspecified_responses = JSONField(null=True)
handle = models.CharField(max_length=32, db_index=True, blank=False, unique=True)
class UserCountry(TimeStampable):
country = models.ForeignKey(Country)
user = models.ForeignKey(UserProfile)
class UserSkill(TimeStampable, Verifiable):
user = models.ForeignKey(User)
skill = models.ForeignKey(Skill)
level = models.IntegerField(default=0)
class Meta:
unique_together = ('user', 'skill')
class UserRole(TimeStampable):
user = models.ForeignKey(User)
role = models.ForeignKey(Role)
class UserLanguage(TimeStampable):
language = models.ForeignKey(Language)
user = models.ForeignKey(UserProfile)
class UserPreferences(TimeStampable):
user = models.OneToOneField(User, related_name='preferences')
language = models.ForeignKey(Language, null=True, blank=True)
currency = models.ForeignKey(Currency, null=True, blank=True)
login_alerts = models.SmallIntegerField(default=0)
auto_accept = models.BooleanField(default=False)
new_tasks_notifications = models.BooleanField(default=True)
aux_attributes = JSONField(default={})
class Template(TimeStampable, Archivable, Revisable):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the template name!"})
owner = models.ForeignKey(User, related_name='templates')
source_html = models.TextField(default=None, null=True)
price = models.FloatField(default=0)
share_with_others = models.BooleanField(default=False)
class BatchFile(TimeStampable, Archivable):
name = models.CharField(max_length=256)
file = models.FileField(upload_to='project_files/')
format = models.CharField(max_length=8, default='csv')
number_of_rows = models.IntegerField(default=1, null=True)
column_headers = ArrayField(models.CharField(max_length=64))
first_row = JSONField(null=True, blank=True)
hash_sha512 = models.CharField(max_length=128, null=True, blank=True)
url = models.URLField(null=True, blank=True)
def parse_csv(self):
delimiter = get_delimiter(self.file.name)
df = pd.DataFrame(pd.read_csv(self.file, sep=delimiter, encoding='utf-8'))
df = df.where((pd.notnull(df)), None)
return df.to_dict(orient='records')
def delete(self, *args, **kwargs):
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
path = os.path.join(root, self.file.url[1:])
os.remove(path)
super(BatchFile, self).delete(*args, **kwargs)
class ProjectQueryset(models.query.QuerySet):
def active(self):
return self.filter(deleted_at__isnull=True)
def inactive(self):
return self.filter(deleted_at__isnull=False)
def filter_by_boomerang(self, worker, sort_by='-boomerang'):
worker_cache = get_worker_cache(worker.id)
worker_data = json.dumps(worker_cache)
# noinspection SqlResolve
query = '''
WITH projects AS (
SELECT
ratings.project_id,
ratings.min_rating new_min_rating,
requester_ratings.requester_rating,
requester_ratings.raw_rating,
p_available.remaining available_tasks
FROM crowdsourcing_project p
INNER JOIN (SELECT
p.id,
count(t.id) remaining
FROM crowdsourcing_task t INNER JOIN (SELECT
group_id,
max(id) id
FROM crowdsourcing_task
WHERE deleted_at IS NULL
GROUP BY group_id) t_max ON t_max.id = t.id
INNER JOIN crowdsourcing_project p ON p.id = t.project_id
INNER JOIN (
SELECT
t.group_id,
sum(t.own) own,
sum(t.others) others
FROM (
SELECT
t.group_id,
CASE WHEN (tw.worker_id = (%(worker_id)s) AND tw.status <> 6)
or tw.is_qualified is FALSE
THEN 1
ELSE 0 END own,
CASE WHEN (tw.worker_id IS NOT NULL AND tw.worker_id <> (%(worker_id)s))
AND tw.status NOT IN (4, 6, 7)
THEN 1
ELSE 0 END others
FROM crowdsourcing_task t
LEFT OUTER JOIN crowdsourcing_taskworker tw ON (t.id =
tw.task_id)
WHERE t.exclude_at IS NULL AND t.deleted_at IS NULL) t
GROUP BY t.group_id) t_count ON t_count.group_id = t.group_id
WHERE t_count.own = 0 AND t_count.others < p.repetition
GROUP BY p.id) p_available ON p_available.id = p.id
INNER JOIN (
SELECT
u.id,
u.username,
CASE WHEN e.id IS NOT NULL
THEN TRUE
ELSE FALSE END is_denied
FROM auth_user u
LEFT OUTER JOIN crowdsourcing_requesteraccesscontrolgroup g
ON g.requester_id = u.id AND g.type = 2 AND g.is_global = TRUE
LEFT OUTER JOIN crowdsourcing_workeraccesscontrolentry e
ON e.group_id = g.id AND e.worker_id = (%(worker_id)s)) requester
ON requester.id=p.owner_id
LEFT OUTER JOIN (
SELECT
qualification_id,
json_agg(i.expression::JSON) expressions
FROM crowdsourcing_qualificationitem i
where i.scope = 'project'
GROUP BY i.qualification_id
) quals
ON quals.qualification_id = p.qualification_id
INNER JOIN get_min_project_ratings() ratings
ON p.id = ratings.project_id
LEFT OUTER JOIN (
SELECT
requester_id,
requester_rating AS raw_rating,
CASE WHEN requester_rating IS NULL AND requester_avg_rating
IS NOT NULL
THEN requester_avg_rating
WHEN requester_rating IS NULL AND requester_avg_rating IS NULL
THEN 1.99
WHEN requester_rating IS NOT NULL AND requester_avg_rating IS NULL
THEN requester_rating
ELSE requester_rating + 0.1 * requester_avg_rating END requester_rating
FROM get_requester_ratings(%(worker_id)s)) requester_ratings
ON requester_ratings.requester_id = ratings.owner_id
INNER JOIN (SELECT
requester_id,
CASE WHEN worker_rating IS NULL AND worker_avg_rating
IS NOT NULL
THEN worker_avg_rating
WHEN worker_rating IS NULL AND worker_avg_rating IS NULL
THEN 1.99
WHEN worker_rating IS NOT NULL AND worker_avg_rating IS NULL
THEN worker_rating
ELSE worker_rating + 0.1 * worker_avg_rating END worker_rating
FROM get_worker_ratings(%(worker_id)s)) worker_ratings
ON worker_ratings.requester_id = ratings.owner_id
AND (worker_ratings.worker_rating >= ratings.min_rating or p.enable_boomerang is FALSE
or p.owner_id = %(worker_id)s)
WHERE coalesce(p.deadline, NOW() + INTERVAL '1 minute') > NOW() AND p.status = 3 AND deleted_at IS NULL
AND (requester.is_denied = FALSE OR p.enable_blacklist = FALSE)
AND is_worker_qualified(quals.expressions, (%(worker_data)s)::JSON)
ORDER BY requester_rating DESC, ratings.project_id desc
)
select p.id, p.name, p.price, p.owner_id, p.created_at, p.allow_feedback,
p.is_prototype, projects.requester_rating, projects.raw_rating, projects.available_tasks,
up.handle requester_handle, p.published_at
FROM crowdsourcing_project p
INNER JOIN crowdsourcing_userprofile up on up.user_id = p.owner_id
INNER JOIN projects ON projects.project_id = p.id ORDER BY case when %(sort_by)s='-boomerang'
then requester_rating when %(sort_by)s='-available_tasks' then available_tasks
when %(sort_by)s='-published_at' then 12 when %(sort_by)s='-price' then p.price
end desc nulls last, p.id desc;
'''
return self.raw(query, params={
'worker_id': worker.id,
'st_in_progress': Project.STATUS_IN_PROGRESS,
'worker_data': worker_data,
'sort_by': sort_by
})
class Project(TimeStampable, Archivable, Revisable):
STATUS_DRAFT = 1
STATUS_PUBLISHED = 2
STATUS_IN_PROGRESS = 3
STATUS_COMPLETED = 4
STATUS_PAUSED = 5
STATUS_CROWD_REJECTED = 6
STATUS_ARCHIVED = 7
STATUS = (
(STATUS_DRAFT, 'Draft'),
(STATUS_PUBLISHED, 'Published'),
(STATUS_IN_PROGRESS, 'In Progress'),
(STATUS_COMPLETED, 'Completed'),
(STATUS_PAUSED, 'Paused'),
(STATUS_CROWD_REJECTED, 'Rejected'),
(STATUS_ARCHIVED, 'Archived'),
)
PERMISSION_ORW_WRW = 1
PERMISSION_OR_WRW = 2
PERMISSION_OR_WR = 3
PERMISSION_WR = 4
PERMISSION = (
(PERMISSION_ORW_WRW, 'Others:Read+Write::Workers:Read+Write'),
(PERMISSION_OR_WRW, 'Others:Read::Workers:Read+Write'),
(PERMISSION_OR_WR, 'Others:Read::Workers:Read'),
(PERMISSION_WR, 'Others:None::Workers:Read')
)
name = models.CharField(max_length=256, default="Untitled Project",
error_messages={'required': "Please enter the project name!"})
description = models.TextField(null=True, max_length=2048, blank=True)
owner = models.ForeignKey(User, related_name='projects')
parent = models.ForeignKey('self', related_name='projects', null=True, on_delete=models.SET_NULL)
template = models.ForeignKey(Template, null=True)
categories = models.ManyToManyField(Category, through='ProjectCategory')
keywords = models.TextField(null=True, blank=True)
status = models.IntegerField(choices=STATUS, default=STATUS_DRAFT)
qualification = models.ForeignKey('Qualification', null=True)
price = models.DecimalField(decimal_places=2, max_digits=19, null=True)
aux_attributes = JSONField(null=True, default={'sort_results_by': 'worker_id'})
repetition = models.IntegerField(default=1)
max_tasks = models.PositiveIntegerField(null=True, default=None)
is_micro = models.BooleanField(default=True)
is_prototype = models.BooleanField(default=True)
is_api_only = models.BooleanField(default=True)
is_paid = models.BooleanField(default=False)
is_review = models.BooleanField(default=False)
# has_review = models.BooleanField(default=False)
timeout = models.DurationField(null=True, default=settings.DEFAULT_TASK_TIMEOUT)
deadline = models.DateTimeField(null=True)
task_time = models.DurationField(null=True)
has_data_set = models.BooleanField(default=False)
data_set_location = models.CharField(max_length=256, null=True, blank=True)
batch_files = models.ManyToManyField(BatchFile, through='ProjectBatchFile')
min_rating = models.FloatField(default=3.0)
previous_min_rating = models.FloatField(default=3.0)
tasks_in_progress = models.IntegerField(default=0)
rating_updated_at = models.DateTimeField(auto_now_add=True, auto_now=False)
allow_feedback = models.BooleanField(default=True)
feedback_permissions = models.IntegerField(choices=PERMISSION, default=PERMISSION_ORW_WRW)
enable_blacklist = models.BooleanField(default=True)
enable_whitelist = models.BooleanField(default=True)
post_mturk = models.BooleanField(default=False)
publish_at = models.DateTimeField(null=True)
published_at = models.DateTimeField(null=True)
last_opened_at = models.DateTimeField(null=True)
allow_price_per_task = models.BooleanField(default=False)
task_price_field = models.CharField(max_length=32, null=True)
amount_due = models.DecimalField(decimal_places=2, max_digits=8, default=0)
discussion_link = models.TextField(null=True, blank=True)
topic_id = models.IntegerField(null=True, default=-1)
post_id = models.IntegerField(null=True, default=-1)
enable_boomerang = models.BooleanField(default=True)
objects = ProjectQueryset.as_manager()
class Meta:
index_together = [['deadline', 'status', 'min_rating', 'deleted_at'], ['owner', 'deleted_at', 'created_at']]
class ProjectWorkerToRate(TimeStampable):
project = models.ForeignKey(Project, on_delete=models.CASCADE)
batch = models.ForeignKey('Batch', on_delete=models.SET_NULL, null=True)
worker = models.ForeignKey(User)
class ProjectBatchFile(models.Model):
batch_file = models.ForeignKey(BatchFile, on_delete=models.CASCADE)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
class Meta:
unique_together = ('batch_file', 'project',)
class ProjectCategory(TimeStampable):
project = models.ForeignKey(Project)
category = models.ForeignKey(Category)
class Meta:
unique_together = ('category', 'project')
class TemplateItem(TimeStampable, Revisable):
ROLE_DISPLAY = 'display'
ROLE_INPUT = 'input'
ROLE = (
(ROLE_DISPLAY, 'Display'),
(ROLE_INPUT, 'Input'),
)
name = models.CharField(max_length=128, default='')
template = models.ForeignKey(Template, related_name='items', on_delete=models.CASCADE)
role = models.CharField(max_length=16, choices=ROLE, default=ROLE_DISPLAY)
type = models.CharField(max_length=16, db_index=True)
sub_type = models.CharField(max_length=16, null=True)
position = models.IntegerField(null=True)
required = models.BooleanField(default=True)
predecessor = models.ForeignKey('self', null=True, related_name='successors', on_delete=models.SET_NULL,
db_index=True)
aux_attributes = JSONField()
class Meta:
ordering = ['position']
class TemplateItemProperties(TimeStampable):
template_item = models.ForeignKey(TemplateItem, related_name='properties')
attribute = models.CharField(max_length=128)
operator = models.CharField(max_length=128)
value1 = models.CharField(max_length=128)
value2 = models.CharField(max_length=128)
class CollectiveRejection(TimeStampable, Archivable):
REASON_LOW_PAY = 1
REASON_INAPPROPRIATE = 2
REASON_OTHER = 3
REASON = (
(REASON_LOW_PAY, 'The pay is too low for the amount of work'),
(REASON_INAPPROPRIATE, 'The content is offensive or inappropriate'),
(REASON_OTHER, 'Other')
)
reason = models.IntegerField(choices=REASON)
detail = models.CharField(max_length=1024, null=True, blank=True)
class Batch(TimeStampable):
parent = models.ForeignKey('Batch', null=True)
class Task(TimeStampable, Archivable, Revisable):
project = models.ForeignKey(Project, related_name='tasks', on_delete=models.CASCADE)
data = JSONField(null=True)
exclude_at = models.ForeignKey(Project, related_name='excluded_tasks', db_column='exclude_at',
null=True, on_delete=models.SET_NULL)
row_number = models.IntegerField(null=True, db_index=True)
rerun_key = models.CharField(max_length=64, db_index=True, null=True)
batch = models.ForeignKey('Batch', related_name='tasks', null=True, on_delete=models.CASCADE)
hash = models.CharField(max_length=64, db_index=True)
min_rating = models.FloatField(default=3.0)
rating_updated_at = models.DateTimeField(auto_now=False, auto_now_add=False, null=True)
price = models.DecimalField(decimal_places=2, max_digits=19, null=True)
class Meta:
index_together = (('rerun_key', 'hash',),)
class TaskWorker(TimeStampable, Archivable, Revisable):
STATUS_IN_PROGRESS = 1
STATUS_SUBMITTED = 2
STATUS_ACCEPTED = 3
STATUS_REJECTED = 4
STATUS_RETURNED = 5
STATUS_SKIPPED = 6
STATUS_EXPIRED = 7
STATUS = (
(STATUS_IN_PROGRESS, 'In Progress'),
(STATUS_SUBMITTED, 'Submitted'),
(STATUS_ACCEPTED, 'Accepted'),
(STATUS_REJECTED, 'Rejected'),
(STATUS_RETURNED, 'Returned'),
(STATUS_SKIPPED, 'Skipped'),
(STATUS_EXPIRED, 'Expired'),
)
task = models.ForeignKey(Task, related_name='task_workers', on_delete=models.CASCADE)
worker = models.ForeignKey(User, related_name='task_workers')
status = models.IntegerField(choices=STATUS, default=STATUS_IN_PROGRESS, db_index=True)
is_paid = models.BooleanField(default=False)
paid_at = models.DateTimeField(auto_now_add=False, auto_now=False, null=True)
collective_rejection = models.OneToOneField(CollectiveRejection, null=True)
charge = models.ForeignKey('StripeCharge', null=True)
submitted_at = models.DateTimeField(auto_now_add=False, auto_now=False, null=True, db_index=True)
started_at = models.DateTimeField(auto_now_add=False, auto_now=False, null=True)
approved_at = models.DateTimeField(auto_now_add=False, auto_now=False, null=True)
returned_at = models.DateTimeField(auto_now_add=False, auto_now=False, null=True)
is_qualified = models.BooleanField(default=True, db_index=True)
attempt = models.SmallIntegerField(default=0)
auto_approved = models.BooleanField(default=False)
class Meta:
unique_together = ('task', 'worker')
class TaskWorkerSession(TimeStampable):
started_at = models.DateTimeField(auto_now_add=False, auto_now=False, db_index=True)
ended_at = models.DateTimeField(auto_now_add=False, auto_now=False, null=True, db_index=True)
task_worker = models.ForeignKey('TaskWorker', related_name='sessions')
class TaskWorkerResult(TimeStampable, Archivable):
task_worker = models.ForeignKey(TaskWorker, related_name='results', on_delete=models.CASCADE)
result = JSONField(null=True)
attachment = models.ForeignKey('FileResponse', null=True)
template_item = models.ForeignKey(TemplateItem, related_name='+')
class FileResponse(TimeStampable):
file = models.FileField(upload_to='responses/%Y/%m/%d/')
name = models.CharField(max_length=256)
owner = models.ForeignKey(User)
hash_sha512 = models.CharField(max_length=128, null=True, blank=True)
class WorkerProjectScore(TimeStampable):
project_group_id = models.IntegerField()
worker = models.ForeignKey(User, related_name='project_scores')
mu = models.FloatField(default=25.000)
sigma = models.FloatField(default=8.333)
class WorkerMatchScore(TimeStampable):
worker = models.ForeignKey(TaskWorker, related_name='match_scores')
project_score = models.ForeignKey(WorkerProjectScore, related_name='match_scores')
mu = models.FloatField()
sigma = models.FloatField()
class MatchGroup(TimeStampable):
batch = models.ForeignKey(Batch, related_name='match_group')
rerun_key = models.CharField(max_length=64, null=True, db_index=True)
hash = models.CharField(max_length=64, db_index=True)
parent = models.ForeignKey('self', related_name='children_groups', null=True)
class Meta:
index_together = (('rerun_key', 'hash',),)
class Match(TimeStampable):
STATUS_CREATED = 1
STATUS_COMPLETED = 2
STATUS = (
(STATUS_CREATED, 'Created'),
(STATUS_COMPLETED, 'Completed'),
)
status = models.IntegerField(choices=STATUS, default=STATUS_CREATED)
submitted_at = models.DateTimeField(null=True)
group = models.ForeignKey(MatchGroup, related_name='matches')
task = models.ForeignKey(Task, related_name='matches', null=True)
class MatchWorker(TimeStampable):
match = models.ForeignKey(Match, related_name='workers')
task_worker = models.ForeignKey(TaskWorker, related_name='matches')
mu = models.FloatField(null=True)
sigma = models.FloatField(null=True)
old_mu = models.FloatField(default=25.0, null=True)
old_sigma = models.FloatField(default=8.333, null=True)
class ActivityLog(TimeStampable):
"""
Track all user's activities: Create, Update and Delete
"""
activity = models.CharField(max_length=512)
author = models.ForeignKey(User, related_name='activities')
class Qualification(TimeStampable):
TYPE_STRICT = 1
TYPE_FLEXIBLE = 2
name = models.CharField(max_length=64, null=True)
description = models.CharField(max_length=512, null=True)
owner = models.ForeignKey(User, related_name='qualifications')
TYPE = (
(TYPE_STRICT, "Strict"),
(TYPE_FLEXIBLE, 'Flexible')
)
type = models.IntegerField(choices=TYPE, default=TYPE_STRICT)
class QualificationItem(TimeStampable):
qualification = models.ForeignKey(Qualification, related_name='items', on_delete=models.CASCADE)
expression = JSONField()
position = models.SmallIntegerField(null=True)
group = models.SmallIntegerField(default=1)
scope = models.CharField(max_length=32, default='project', db_index=True)
class Rating(TimeStampable):
RATING_WORKER = 1
RATING_REQUESTER = 2
RATING = (
(RATING_WORKER, "Worker"),
(RATING_REQUESTER, 'Requester')
)
origin = models.ForeignKey(User, related_name='ratings_to')
target = models.ForeignKey(User, related_name='ratings_from')
weight = models.FloatField(default=2)
origin_type = models.IntegerField(choices=RATING)
task = models.ForeignKey(Task, null=True)
class Meta:
index_together = [
['origin', 'target'],
['origin', 'target', 'updated_at', 'origin_type']
]
class RawRatingFeedback(TimeStampable):
requester = models.ForeignKey(User, related_name='raw_feedback')
worker = models.ForeignKey(User, related_name='+')
weight = models.FloatField(default=0)
task = models.ForeignKey(Task, null=True)
is_excluded = models.BooleanField(default=False)
class Meta:
unique_together = ('requester', 'worker', 'task')
index_together = ('requester', 'worker', 'task', 'is_excluded')
class BoomerangLog(TimeStampable):
object_id = models.PositiveIntegerField()
object_type = models.CharField(max_length=8, default='project')
min_rating = models.FloatField(default=3.0)
rating_updated_at = models.DateTimeField(auto_now=False, auto_now_add=False, null=True)
reason = models.CharField(max_length=64, null=True)
class Conversation(TimeStampable, Archivable):
subject = models.CharField(max_length=64)
sender = models.ForeignKey(User, related_name='conversations')
recipients = models.ManyToManyField(User, through='ConversationRecipient')
class ConversationRecipient(TimeStampable, Archivable):
STATUS_OPEN = 1
STATUS_MINIMIZED = 2
STATUS_CLOSED = 3
STATUS_MUTED = 4
STATUS = (
(STATUS_OPEN, "Open"),
(STATUS_MINIMIZED, 'Minimized'),
(STATUS_CLOSED, 'Closed'),
(STATUS_MUTED, 'Muted')
)
recipient = models.ForeignKey(User)
conversation = models.ForeignKey(Conversation, on_delete=models.CASCADE)
status = models.SmallIntegerField(choices=STATUS, default=STATUS_OPEN)
class Message(TimeStampable, Archivable):
conversation = models.ForeignKey(Conversation, related_name='messages', on_delete=models.CASCADE)
sender = models.ForeignKey(User, related_name='messages')
body = models.TextField(max_length=8192)
recipients = models.ManyToManyField(User, through='MessageRecipient')
class MessageRecipient(TimeStampable, Archivable):
STATUS_SENT = 1
STATUS_DELIVERED = 2
STATUS_READ = 3
STATUS = (
(STATUS_SENT, 'Sent'),
(STATUS_DELIVERED, 'Delivered'),
(STATUS_READ, 'Read')
)
message = models.ForeignKey(Message, on_delete=models.CASCADE)
recipient = models.ForeignKey(User)
status = models.IntegerField(choices=STATUS, default=STATUS_SENT)
delivered_at = models.DateTimeField(blank=True, null=True)
read_at = models.DateTimeField(blank=True, null=True)
class EmailNotification(TimeStampable):
# use updated_at to check last notification sent
recipient = models.OneToOneField(User)
class Comment(TimeStampable, Archivable):
sender = models.ForeignKey(User, related_name='comments')
body = models.TextField(max_length=8192)
parent = models.ForeignKey('self', related_name='comments', null=True)
class Meta:
ordering = ['created_at']
class ProjectComment(TimeStampable, Archivable):
project = models.ForeignKey(Project, related_name='comments')
comment = models.ForeignKey(Comment)
ready_for_launch = models.NullBooleanField()
aux_attributes = JSONField(default={}, null=True)
class TaskComment(TimeStampable, Archivable):
task = models.ForeignKey(Task, related_name='comments')
comment = models.ForeignKey(Comment)
class FinancialAccount(TimeStampable, Activable):
TYPE_WORKER = 1
TYPE_REQUESTER = 2
TYPE_ESCROW = 3
TYPE = (
(TYPE_WORKER, 'Earnings'),
(TYPE_REQUESTER, 'Deposits'),
(TYPE_ESCROW, 'Escrow')
)
owner = models.ForeignKey(User, related_name='financial_accounts', null=True)
type = models.IntegerField(choices=TYPE)
balance = models.DecimalField(default=0, decimal_places=4, max_digits=19)
is_system = models.BooleanField(default=False)
class RequesterAccessControlGroup(TimeStampable):
TYPE_ALLOW = 1
TYPE_DENY = 2
TYPE = (
(TYPE_ALLOW, "allow"),
(TYPE_DENY, "deny")
)
requester = models.ForeignKey(User, related_name="access_groups")
type = models.SmallIntegerField(choices=TYPE, default=TYPE_ALLOW)
name = models.CharField(max_length=256, null=True)
is_global = models.BooleanField(default=False)
class Meta:
index_together = [['requester', 'type', 'is_global']]
class WorkerAccessControlEntry(TimeStampable):
worker = models.ForeignKey(User)
group = models.ForeignKey(RequesterAccessControlGroup, related_name='entries')
class Meta:
unique_together = ('group', 'worker')
index_together = [['group', 'worker']]
class ReturnFeedback(TimeStampable, Archivable):
body = models.TextField(max_length=8192)
task_worker = models.ForeignKey(TaskWorker, related_name='return_feedback', on_delete=models.CASCADE)
notification_sent = models.BooleanField(default=False, db_index=True)
notification_sent_at = models.DateTimeField(null=True, auto_now_add=False, auto_now=False)
class Meta:
ordering = ['-created_at']
class Error(TimeStampable, Archivable):
code = models.CharField(max_length=16)
message = models.CharField(max_length=256)
trace = models.CharField(max_length=4096, null=True)
owner = models.ForeignKey(User, null=True, related_name='errors')
class StripeAccount(TimeStampable, Verifiable, StripeObject):
owner = models.OneToOneField(User, related_name='stripe_account')
class StripeCustomer(TimeStampable, StripeObject):
owner = models.OneToOneField(User, related_name='stripe_customer')
account_balance = models.IntegerField(default=0)
class StripeCharge(TimeStampable, StripeObject):
customer = models.ForeignKey(StripeCustomer, related_name='charges')
expired = models.BooleanField(default=False)
expired_at = models.DateTimeField(auto_now_add=False, auto_now=False, null=True)
balance = models.IntegerField()
discount_applied = models.BooleanField(default=False)
raw_amount = models.IntegerField()
discount = models.FloatField(default=1.0)
class Meta:
index_together = (('created_at',), ('created_at', 'customer'))
class StripeRefund(TimeStampable, StripeObject):
charge = models.ForeignKey(StripeCharge, related_name='refunds')
class StripeTransfer(TimeStampable, StripeObject):
destination = models.ForeignKey(User, related_name='received_transfers')
class StripeTransferReversal(TimeStampable, StripeObject):
transfer = models.ForeignKey(StripeTransfer, related_name='reversals')
class ProjectNotificationPreference(TimeStampable):
project_group_id = models.IntegerField()
worker = models.ForeignKey(User, related_name='notification_preferences')
notify = models.BooleanField(default=True)
class Meta:
unique_together = ('project_group_id', 'worker')
class WorkerProjectNotification(TimeStampable):
project = models.ForeignKey('Project')
worker = models.ForeignKey(User, related_name='project_notifications')
class WorkerBonus(TimeStampable):
worker = models.ForeignKey(User, related_name='bonuses_received')
requester = models.ForeignKey(User, related_name='bonuses_given')
reason = models.CharField(max_length=256, null=True, blank=True)
models.ForeignKey(Project, related_name='worker_bonuses', null=True)
charge = models.ForeignKey('StripeCharge', null=True)
amount = models.IntegerField()
class ProjectPreview(TimeStampable):
project = models.ForeignKey('Project')
user = models.ForeignKey(User)
| mit |
sealhuang/brainDecodingToolbox | braincode/vim2/util.py | 3 | 14748 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
import glob
import numpy as np
import nibabel as nib
import matplotlib.pylab as plt
import matplotlib.image as mpimg
from braincode.math import corr2_coef, make_2d_gaussian, make_2d_dog, make_2d_log
def idx2coord(vec_idx):
"""Convert row index in response data matrix into 3D coordinate in
(original) ROI volume.
"""
data_size = (18, 64, 64)
coord_z = vec_idx % data_size[2]
coord_x = vec_idx / (data_size[1]*data_size[2])
coord_y = (vec_idx % (data_size[1]*data_size[2])) / data_size[2]
return (coord_x, coord_y, coord_z)
def coord2idx(coord):
"""Convert a 3D coordinate from nifti file into row index in response
data matrix.
Input must be a tuple.
"""
ncoord = (coord[2], coord[0], 63-coord[1])
return ncoord[2]+ncoord[0]*64*64+ncoord[1]*64
def node2feature(node_idx, data_shape):
"""Convert node index from CNN activation vector into 3 features including
index of channel, row and column position of the filter.
Return a tuple of (channel index, row index, column index).
"""
#data_size = {'conv1': [96, 55, 55],
# 'conv2': [256, 27, 27],
# 'conv3': [384, 13, 13],
# 'conv4': [384, 13, 13],
# 'cpnv5': [256, 13, 13],
# 'pool5': [256, 6, 6]}
#s = data_size[layer_name]
s = data_shape
col_idx = node_idx % s[2]
channel_idx = node_idx / (s[1]*s[2])
row_idx = (node_idx % (s[1]*s[2])) / s[2]
return (channel_idx, row_idx, col_idx)
def vxl_data2nifti(data, vxl_idx, out_file):
"""Save data according to its voxel index into a nifti file."""
data_mtx = np.zeros((18, 64, 64))
data_mtx[:] = np.nan
data_mtx = data_mtx.flatten()
data_mtx[vxl_idx] = data
save2nifti(data_mtx.reshape(18, 64, 64), out_file)
def save2nifti(data, filename):
"""Save 3D data as nifti file.
Original data shape is (18, 64, 64), and the resulting data shape is
(64, 64, 18) which orientation is SRP."""
# roll axis
ndata = np.rollaxis(data, 0, 3)
ndata = ndata[:, ::-1, :]
# generate affine matrix
aff = np.zeros((4, 4))
aff[0, 1] = 2
aff[1, 2] = -2.5
aff[2, 0] = 2
aff[3, 3] = 1
img = nib.Nifti1Image(ndata, aff)
nib.save(img, filename)
def mask2nifti(data, filename):
"""Save 3D mask derived from pycortex as nifti file.
Original data shape is (18, 64, 64), and the resulting data shape is
(64, 64, 18) which orientation is SRP."""
# roll axis
data = data.astype('<f8')
ndata = np.rollaxis(data, 0, 3)
ndata = np.rollaxis(ndata, 0, 2)
ndata = ndata[:, ::-1, :]
# generate affine matrix
aff = np.zeros((4, 4))
aff[0, 1] = 2
aff[1, 2] = -2.5
aff[2, 0] = 2
aff[3, 3] = 1
img = nib.Nifti1Image(ndata, aff)
nib.save(img, filename)
def plot_prf(prf_file):
"""Plot pRF."""
prf_data = np.load(prf_file)
vxl = prf_data[..., 0]
# figure config
for f in range(96):
fig, axs = plt.subplots(5, 8)
for t in range(40):
tmp = vxl[:, t].reshape(96, 55, 55)
tmp = tmp[f, :]
im = axs[t/8][t%8].imshow(tmp, interpolation='nearest',
cmap=plt.cm.ocean,
vmin=-0.2, vmax=0.3)
fig.colorbar(im)
#plt.show()
fig.savefig('%s.png'%(f))
def channel_sim(feat_file):
"""Compute similarity between each pair of channels."""
feat = np.load(feat_file)
print feat.shape
feat = feat.reshape(96, 55, 55, 540)
simmtx = np.zeros((feat.shape[0], feat.shape[0]))
for i in range(feat.shape[0]):
for j in range(i+1, feat.shape[0]):
print '%s - %s' %(i, j)
x = feat[i, :].reshape(-1, feat.shape[3])
y = feat[j, :].reshape(-1, feat.shape[3])
tmp = corr2_coef(x, y)
tmp = tmp.diagonal()
simmtx[i, j] = tmp.mean()
np.save('sim_mtx.npy', simmtx)
im = plt.imshow(simmtx, interpolation='nearest', cmap=plt.cm.ocean)
plt.colorbar(im)
plt.show()
def data_swap(nifti_file):
"""Convert nifti data into original data shape."""
data = nib.load(nifti_file).get_data()
ndata = data[:, ::-1, :]
ndata = np.rollaxis(ndata, 0, 3)
ndata = np.rollaxis(ndata, 0, 3)
return ndata
def nifti4pycortex(nifti_file):
"""Load nifti file for pycortex visualization."""
data = nib.load(nifti_file).get_data()
ndata = np.rollaxis(data, 0, 3)
ndata = np.rollaxis(ndata, 0, 2)
return ndata
def plot_cca_fweights(data, out_dir, prefix_name, two_side=False):
"""Plot features weights derived from CCA."""
if len(data.shape)==3:
data = np.expand_dims(data, axis=3)
n_components = data.shape[3]
n_channels = data.shape[0]
for f in range(n_components):
fig, axs = plt.subplots(8, 12)
cdata = data[..., f]
if two_side:
maxv = max(cdata.max(), -1*cdata.min())
minv = -1 * maxv
else:
maxv = cdata.max()
minv = cdata.min()
for c in range(n_channels):
tmp = cdata[c, ...]
im = axs[c/12][c%12].imshow(tmp, interpolation='nearest',
vmin=minv, vmax=maxv)
axs[c/12][c%12].get_xaxis().set_visible(False)
axs[c/12][c%12].get_yaxis().set_visible(False)
fig.subplots_adjust(right=0.85)
cbar_ax = fig.add_axes([0.88, 0.2, 0.03, 0.6])
fig.colorbar(im, cax=cbar_ax)
fig.savefig(os.path.join(out_dir, prefix_name+'_%s.png'%(f+1)))
def plot_avg_weights_pattern(feat_weights, top_channels_num=None):
"""Plot average features weights derived from CCA."""
if len(feat_weights.shape)==3:
feat_weights = np.expand_dims(feat_weights, axis=3)
n_components = feat_weights.shape[3]
n_channels = feat_weights.shape[0]
if top_channels_num and top_channels_num <= n_channels:
avg_weights = feat_weights[:top_channels_num, ...].mean(axis=0)
else:
avg_weights = feat_weights.mean(axis=0)
maxv = avg_weights.max()
minv = avg_weights.min()
fig, axs = plt.subplots(2, 5)
for f in range(n_components):
cdata = avg_weights[..., f]
im = axs[f/5][f%5].imshow(cdata, interpolation='nearest',
vmin=minv, vmax=maxv)
axs[f/5][f%5].get_xaxis().set_visible(False)
axs[f/5][f%5].get_yaxis().set_visible(False)
fig.subplots_adjust(right=0.85)
cbar_ax = fig.add_axes([0.88, 0.2, 0.03, 0.6])
fig.colorbar(im, cax=cbar_ax)
fig.show()
def save_cca_volweights(fmri_weights, mask_file, out_dir, prefix_name,
out_png=True, two_side=False):
"""Save fmri weights derived from CCA as nifti files."""
n_components = fmri_weights.shape[1]
mask = data_swap(mask_file)
vxl_idx = np.nonzero(mask.flatten()==1)[0]
for i in range(n_components):
tmp = np.zeros_like(mask.flatten(), dtype=np.float64)
tmp[vxl_idx] = fmri_weights[:, i]
tmp = tmp.reshape(mask.shape)
nii_file = os.path.join(out_dir, prefix_name+'%s.nii.gz'%(i+1))
save2nifti(tmp, nii_file)
if out_png:
import cortex
from matplotlib import cm
subj_id = out_dir.split('/')[-3]
if two_side:
img = cortex.quickflat.make_figure(cortex.Volume(nii_file,
subj_id, 'func2anat', cmap=cm.bwr,
vmin=-1., vmax=1.),
with_curvature=True)
else:
img = cortex.quickflat.make_figure(cortex.Volume(nii_file,
subj_id, 'func2anat', cmap=cm.hot,
vmin=0., vmax=1.),
with_curvature=True)
png_file = os.path.join(out_dir, prefix_name+'%s.png'%(i+1))
img.savefig(png_file, dpi=200)
def display_video(dataset):
"""Display 3D video."""
plt.ion()
for i in range(dataset.shape[2]):
plt.imshow(dataset[:, i])
plt.pause(0.05)
def plot_kernerls(in_dir, basename, filename):
"""Plot several kernel images in one screen."""
file_num = len(glob.glob(os.path.join(in_dir, basename+'*')))
fig, axs = plt.subplots(8, 12)
for n in range(file_num):
f = os.path.join(in_dir, basename+str(n)+'.png')
img = mpimg.imread(f)
# normalize image into zero-one range
nimg = (img - img.min()) / (img.max() - img.min())
im = axs[n/12][n%12].imshow(nimg)
axs[n/12][n%12].get_xaxis().set_visible(False)
axs[n/12][n%12].get_yaxis().set_visible(False)
fig.savefig(os.path.join(in_dir, filename))
def save_imshow(data, filename, val_range=None):
"""Save `imshow` figure as file."""
fig, ax = plt.subplots()
if val_range:
vmin = val_range[0]
vmax = val_range[1]
else:
vmin = data.min()
vmax = data.max()
cax = ax.imshow(data.astype(np.float64), vmin=vmin, vmax=vmax, cmap='gray')
fig.colorbar(cax)
fig.savefig(filename)
plt.close()
def save_hue(data, filename):
"""Save hue tune for a voxel."""
fig, ax = plt.subplots()
x = np.linspace(0, 2*np.pi, 201)
ax.plot(x, data)
fig.savefig(filename)
plt.close()
def fweights_bar(feat_weights):
"""Bar plots for feature weights derived from CCA.
For each feature/2D feature map, top 20% `abs` weights are averaged
for evaluation.
"""
avg_weights = fweights_top_mean(feat_weights, 0.2)
cc_num = avg_weights.shape[0]
fig, axs = plt.subplots(cc_num, 1)
for i in range(cc_num):
ind = np.arange(channel_num)
axs[i].bar(ind, avg_weights[i], 0.35)
plt.show()
def fweights_top_mean(feat_weights, top_ratio):
"""Derive average of top `top_ratio` weights from each channels."""
cc_num = feat_weights.shape[3]
channel_num = feat_weights.shape[0]
avg_weights = np.zeros((cc_num, channel_num))
for i in range(cc_num):
tmp = feat_weights[..., i]
for j in range(channel_num):
ctmp = np.abs(tmp[j, ...]).flatten()
ctmp.sort()
avg_weights[i, j] = ctmp[-1*int(ctmp.shape[0]*top_ratio):].mean()
return avg_weights
def roi2nifti(fmri_table, filename, mode='full'):
"""Save ROI as a nifti file.
`mode`: 'full' for whole ROIs mask creation.
'small' for mask creation for alignment.
"""
if mode=='full':
roi_label = {'v1lh': 1, 'v1rh': 2, 'v2lh': 3, 'v2rh': 4,
'v3lh': 5, 'v3rh': 6, 'v3alh': 7, 'v3arh': 8,
'v3blh': 9, 'v3brh': 10, 'v4lh': 11, 'v4rh': 12,
'latocclh': 13, 'latoccrh': 14, 'VOlh': 15, 'VOrh': 16,
'STSlh': 17, 'STSrh': 18, 'RSClh': 19, 'RSCrh': 20,
'PPAlh': 21, 'PPArh': 22, 'OBJlh': 23, 'OBJrh': 24,
'MTlh': 25, 'MTrh': 26, 'MTplh': 27, 'MTprh': 28,
'IPlh': 29, 'IPrh': 30, 'FFAlh': 31, 'FFArh': 32,
'EBAlh': 33, 'EBArh': 34, 'OFAlh': 35, 'OFArh': 36,
'v7alh': 37, 'v7arh': 38, 'v7blh': 39, 'v7brh': 40,
'v7clh': 41, 'v7crh': 42, 'v7lh': 43, 'v7rh': 44,
'IPS1lh': 45, 'IPS1rh': 46, 'IPS2lh': 47, 'IPS2rh': 48,
'IPS3lh': 49, 'IPS3rh': 50, 'IPS4lh': 51, 'IPS4rh': 52,
'MSTlh': 53, 'MSTrh': 54, 'TOSlh': 55, 'TOSrh': 56}
else:
roi_label = {'v1lh': 1, 'v1rh': 2, 'v2lh': 3, 'v2rh': 4,
'v3lh': 5, 'v3rh': 6, 'v3alh': 7, 'v3arh': 8,
'v3blh': 9, 'v3brh': 10, 'v4lh': 11, 'v4rh': 12,
'MTlh': 13, 'MTrh': 14, 'MTplh': 15, 'MTprh': 16}
roi_list = fmri_table.list_nodes('/roi')
roi_shape = roi_list[0].shape
roi_mask = np.zeros(roi_shape)
roi_list = [r.name for r in roi_list if r.name in roi_label]
for r in roi_list:
roi_mask += fmri_table.get_node('/roi/%s'%(r))[:] * roi_label[r]
save2nifti(roi_mask, filename)
def get_roi_mask(fmri_table, nifti=False):
"""Save ROIs as a mask."""
roi_list = fmri_table.list_nodes('/roi')
roi_shape = roi_list[0].shape
mask = np.zeros(roi_shape)
for r in roi_list:
mask += fmri_table.get_node('/roi/%s'%(r.name))[:]
if nifti:
save2nifti(mask, 'all_roi_mask.nii.gz')
else:
return mask.flatten()
def gen_mean_vol(fmri_table, dataset, filename):
"""Make a mean response map as a reference volume."""
data = fmri_table.get_node('/'+dataset)[:]
# replace nan to zero
data = np.nan_to_num(data)
mean_data = np.mean(data, axis=1)
vol = np.zeros((18, 64, 64))
for i in range(data.shape[0]):
c = vutil.idx2coord(i)
vol[c[0], c[1], c[2]] = mean_data[i]
save2nifti(vol, filename)
def spatial_sim_seq(fmri_data):
"""Calculate spatial similarity between adjacent time points.
fmri_data : A 2D array, each row represents a voxel's time course.
"""
length = fmri_data.shape[1]
ssim_seq = np.zeros((length, ))
for i in range(1, length):
pdata = fmri_data[:, i-1]
ndata = fmri_data[:, i]
ssim_seq[i] = np.corrcoef(pdata, ndata)[0, 1]
return ssim_seq
def make_gaussian_prf(size):
"""Generate various pRFs based on 2d Gaussian kernel with different
parameters.
Return a pRF matrixs which shape is (size, size, size*size*fwhm#)
"""
fwhm_num = 10
fwhms = np.arange(1, fwhm_num+1)
prfs = np.zeros((size, size, size*size*fwhm_num))
for k in range(fwhm_num):
for i in range(size):
for j in range(size):
idx = k*size*size + i*size + j
prfs[:, :, idx] = make_2d_gaussian(size, fwhm=fwhms[k],
center=(j, i))
return prfs
def sugar_gaussian_f(size, x0, y0, sigma, offset, beta):
"""Sugar function for model fitting."""
g = make_2d_gaussian(size, sigma, center=(y0, x0))
g = offset + beta * g
return g.ravel()
def sugar_dog_f(size, x0, y0, c_sigma, s_sigma, c_beta, s_beta):
"""Sugar function for model fitting."""
g = make_2d_dog(size, c_sigma, s_sigma, c_beta, s_beta, center=(y0, x0))
return g.ravel()
def sugar_log_f(size, x0, y0, sigma, offset, beta):
"""Sugar function for model fitting."""
g = make_2d_log(size, sigma, center=(y0, x0))
g = offset + beta * g
return g.ravel()
| bsd-3-clause |
dendisuhubdy/tensorflow | tensorflow/contrib/training/python/training/feeding_queue_runner_test.py | 76 | 5052 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.estimator.inputs.queues.feeding_functions import _enqueue_data as enqueue_data
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
nat13ejo/garn | garn/wire_3d.py | 1 | 9734 | import kwant
from math import sqrt
from matplotlib import pyplot
import numpy as np
from garn.geometry import hexagon, extension
from garn.system_wide import Wire
class Wire3D(Wire):
"""Wire3D facilitates the modelling of nanowire contact geometries in
Kwant by actings as a help in constructing a hexagonal nanowire
and attaching customizabel contacts in each end.
"""
def __init__(self, base=3, wire_length=30, lead_length=5,
identifier="unnamed", file_name="", step_length=1,
start_top=True, start_right=True, start_left=True,
start_bottom=False, end_top=True, end_right=True,
end_left=True, end_bottom=False):
"""A Instance of Wire3D describes the properties of a 3D nanowire
.. warning:: If keyword parameter `file_name` is set to
anything else than "" all other parameters are
ignored. It is only to facilitate the use of
parameter `file_name` that `base`, `wire_length`
and, `lead_length` parameters are optional.
Parameters
----------
base : int or float, optional
Width of wire.
wire_length : int or float, optional
Length of complete wire including leads.
lead_length : int or float, optional
Length of lead-wire interface in direction of the
Other Parameters
----------------
indentifier : str, optional
Identifies the wire represented in plots and data files produced by garn.
step_length : int or float, optional
Discretization step.
start_top : bool, optional
Boolian vaules of there should be a lead on the top at
the start of the wire
start_right : bool, optional
Boolian vaules of there should be a lead on the right side at
the start of the wire.
start_left : bool, optional
Boolian vaules of there should be a lead on the left side at
the start of the wire.
start_bottom : bool, optional
Boolian vaules of there should be a lead on the bottom at
the start of the wire
end_top : bool, optional
Boolian vaules of there should be a lead on the top at
the end of the wire.
end_right : bool, optional
Boolian vaules of there should be a lead on the right side at
the end of the wire.
end_left : bool, optional
Boolian vaules of there should be a lead on the left side at
the end of the wire.
end_bottom : bool, optional
Boolian vaules of there should be a lead on the bottom at
the end of the wire.
file_name : str, optional
Uses the data-file specified by the str to create a the
instance.
"""
Wire.__init__(self, base=base, wire_length=wire_length,
lead_length=lead_length, identifier=identifier,
file_name=file_name, step_length=step_length,
start_top=start_top, start_right=start_right,
start_left=start_left,
start_bottom=start_bottom, end_top=end_top,
end_right=end_right, end_left=end_left,
end_bottom=end_bottom)
self.lattice = self._lattice()
self._make_system()
#---------------------------------------------------------------------
# Internal functions
#---------------------------------------------------------------------
def _attach_leads(self, lead_start_top, lead_start_side, lead_end_top,
lead_end_side):
"""Attaches leads to system according to the self.leads list
Parameters
----------
lead_start_top : Builder_ with 1D translational symmetry in z-direction
Builder of the lead which is to be attached on the top of
the beginning.
lead_start_side : Builder_ with 1D translational symmetry in x-direction
Builder of the lead which is to be attached on the side of
the beginning.
lead_end_top : Builder_ with 1D translational symmetry in z-direction
Builder of the lead which is to be attached on the top of
the end.
lead_end_side : Builder_ with 1D translational symmetry in x-direction
Builder of the lead which is to be attached on the side of
the end.
.. _Builder:: http://kwant-project.org/doc/1.0/reference/generated/kwant.builder.Builder#kwant.builder.Builder
"""
if self.leads[0]:
self.sys.attach_lead(lead_start_top)
if self.leads[1]:
self.sys.attach_lead(lead_start_side)
if self.leads[2]:
self.sys.attach_lead(lead_start_side.reversed())
if self.leads[3]:
self.sys.attach_lead(lead_start_top.reversed())
if self.leads[4]:
self.sys.attach_lead(lead_end_top)
if self.leads[5]:
self.sys.attach_lead(lead_end_side)
if self.leads[6]:
self.sys.attach_lead(lead_end_side.reversed())
if self.leads[7]:
self.sys.attach_lead(lead_end_top.reversed())
def _make_system(self):
"""Fills the Builder object with sites and hoppings.
This is were the sites in the scattering region are added to
the kwant.Builder object and functions to create leads and
attach them are called. Welcome to the heart of
:class:`garn.Wire3D`.
"""
#add sites in scattering region
self.sys[self.lattice.shape(
self._hexagon_wire, (0, 0, 0))] = self._onsite
self.sys[self.lattice.neighbors()] = - self.t
lead_start_top, lead_end_top = self._create_leads((0, 0, self.a))
lead_start_side, lead_end_side = self._create_leads((self.a, 0, 0))
self._attach_leads(lead_start_top, lead_start_side,
lead_end_top, lead_end_side)
#self.system_plot()
self.sys = self.sys.finalized()
def _hexagon_wire(self, pos):
""" Find out if the position is inside a hexagonal wire."""
x, y, z = pos
if (hexagon((x, z), self.base) & (y >= 0) & (y < self.wire_length)):
return True
else:
return False
def _positions_of_leads(self):
"""Calculate positions from where to start fill leads
Returns
-------
start_top_site: tuple of 3 floats
Top left corner of rectange enclosing the hexagon of the
beggining of the wire.
end_top_site: tuple of 3 floats
Top left corner of rectange enclosing the hexagon of the
wire at a the begging of the lead at the end of the wire.
Notes
-----
Explaining these positions are messy so here is
some math instead.
.. math::
start_top_site = (-\dfrac{base}{2}, 0, \dfrac{\sqrt{3}base}{2}) \
start_end_site = (-\dfrac{base}\2}, wire_length - lead_length, \dfrac{\sqrt{3}base}{2})
"""
xs, ys, zs = self.lattice.closest(( - self.base / 2.0, 0,
sqrt(3) / 2.0 *
self.base))
xe, ye, ze = self.lattice.closest( (- self.base / 2.0,
self.wire_length -
self.lead_length, sqrt(3)
/ 2.0 * self.base))
start_top_site = (xs, ys, zs)
end_top_site = (xe, ye, ze)
return start_top_site, end_top_site
def _lattice(self):
# Set lattice vectors for lattice object
basis_vectors = ((self.a, 0, 0), (0, self.a, 0), (0, 0, self.a))
# return the lattice object
return kwant.lattice.Monatomic(basis_vectors)
def _onsite(self, args):
# + kwant.digest.gauss(str(site.pos))
return 6 * self.t
def _fill_lead(self, lead, position, side=False):
x, y, z = position
start_x = -self.base + 1
if not side:
lead[[self.lattice(i, j, 0) for i in range(-self.base+1,
self.base) for j in range(y, y +
self.lead_length)]] = 6 * self.t
return lead
if side:
lead[[self.lattice(0, j, k) for j in
range(y, y + self.lead_length) for k in
range(int(-self.base * sqrt(3) / 2.0),
int(self.base * sqrt(3) / 2.0)+1)]] = 6 * self.t
return lead
def _create_leads(self, sym):
""" Return lead at the start and end of wire with symetry sym"""
if (sym == (self.a, 0, 0)):
side = True
else:
side = False
lead_start = kwant.Builder(
kwant.TranslationalSymmetry(sym))
lead_end = kwant.Builder(
kwant.TranslationalSymmetry(sym))
pos_start, pos_end = self._positions_of_leads()
lead_end = self._fill_lead(lead_end, pos_end, side)
lead_start = self._fill_lead(lead_start, pos_start, side)
lead_end[self.lattice.neighbors()] = -self.t
lead_start[self.lattice.neighbors()] = -self.t
return lead_start, lead_end
| mit |
dsquareindia/scikit-learn | sklearn/metrics/cluster/tests/test_supervised.py | 34 | 10313 | import numpy as np
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import fowlkes_mallows_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.utils.testing import (
assert_equal, assert_almost_equal, assert_raise_message,
)
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
# Compute score for random uniform cluster labelings
random_labels = np.random.RandomState(seed).randint
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k, size=n_samples)
labels_b = random_labels(low=0, high=k, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
# Check that adjusted scores are almost zero on random labels
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
# Compute the Adjusted Mutual Information and test against known values
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# with provided sparse contingency
C = contingency_matrix(labels_a, labels_b, sparse=True)
mi = mutual_info_score(labels_a, labels_b, contingency=C)
assert_almost_equal(mi, 0.41022, 5)
# with provided dense contingency
C = contingency_matrix(labels_a, labels_b)
mi = mutual_info_score(labels_a, labels_b, contingency=C)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
n_samples = C.sum()
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_expected_mutual_info_overflow():
# Test for regression where contingency cell exceeds 2**16
# leading to overflow in np.outer, resulting in EMI > 1
assert expected_mutual_information(np.array([[70000]]), 70000) <= 1
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_contingency_matrix_sparse():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C_sparse = contingency_matrix(labels_a, labels_b, sparse=True).toarray()
assert_array_almost_equal(C, C_sparse)
C_sparse = assert_raise_message(ValueError,
"Cannot set 'eps' when sparse=True",
contingency_matrix, labels_a, labels_b,
eps=1e-10, sparse=True)
def test_exactly_zero_info_score():
# Check numerical stability when information is exactly zero
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = (np.ones(i, dtype=np.int),
np.arange(i, dtype=np.int))
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
def test_v_measure_and_mutual_information(seed=36):
# Check relation between v_measure, entropy and mutual information
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = (random_state.randint(0, 10, i),
random_state.randint(0, 10, i))
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
def test_fowlkes_mallows_score():
# General case
score = fowlkes_mallows_score([0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 2, 2])
assert_almost_equal(score, 4. / np.sqrt(12. * 6.))
# Perfect match but where the label names changed
perfect_score = fowlkes_mallows_score([0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0])
assert_almost_equal(perfect_score, 1.)
# Worst case
worst_score = fowlkes_mallows_score([0, 0, 0, 0, 0, 0],
[0, 1, 2, 3, 4, 5])
assert_almost_equal(worst_score, 0.)
def test_fowlkes_mallows_score_properties():
# handcrafted example
labels_a = np.array([0, 0, 0, 1, 1, 2])
labels_b = np.array([1, 1, 2, 2, 0, 0])
expected = 1. / np.sqrt((1. + 3.) * (1. + 2.))
# FMI = TP / sqrt((TP + FP) * (TP + FN))
score_original = fowlkes_mallows_score(labels_a, labels_b)
assert_almost_equal(score_original, expected)
# symetric property
score_symetric = fowlkes_mallows_score(labels_b, labels_a)
assert_almost_equal(score_symetric, expected)
# permutation property
score_permuted = fowlkes_mallows_score((labels_a + 1) % 3, labels_b)
assert_almost_equal(score_permuted, expected)
# symetric and permutation(both together)
score_both = fowlkes_mallows_score(labels_b, (labels_a + 2) % 3)
assert_almost_equal(score_both, expected)
| bsd-3-clause |
jblupus/PyLoyaltyProject | loyalty/loyalty.py | 1 | 15648 | #!/usr/bin/python
# coding=utf-8
import json
import os
from threading import Thread
import numpy as np
import pandas as pd
from utils import HOME
PATH = HOME + '/Dropbox/Twitter/'
DATASETS_PATH = PATH + 'Data/datasets/'
PROFILE_PATH = PATH + 'Data/profile/users_profile_data.csv'
class Entropy(Thread):
def __init__(self, inputfile, outputfile):
super(Entropy, self).__init__()
self.inputfile = inputfile
self.outputfile = outputfile
def run(self):
with open(self.inputfile, 'r') as infile, open(self.outputfile, 'wb') as outfile:
entropies = {}
for line in infile.readlines():
line_json = json.loads(line)
key = line_json.keys()[0]
if len(line_json[key]) > 10:
entropies.update({key: calc_entropy(line_json[key])})
json.dump({'entropies': entropies}, outfile, indent=4)
def calc_entropy(freq):
rel_freq = np.array(freq) / float(sum(freq))
return -1 * np.sum(rel_freq * np.log10(rel_freq)) / np.log10(np.size(freq))
def calc_interval_size(values, size):
return round(np.size(values) / size, 4)
def clean_dict(intervals):
keys = filter(lambda k: intervals[k] == 0, intervals)
for key in keys:
del intervals[key]
return intervals
def save_json(filename, data):
with open(filename, 'wb') as outfile:
json.dump({'intervals': data}, outfile, indent=4)
def calc_alters(values):
size = float(np.size(values))
if size == 0:
return {}
values_mean = np.mean(values)
values_sd = np.std(values)
intervals = {0: calc_interval_size(filter(lambda v: v <= values_mean, values), size)}
if values_sd > 0:
max_value = np.max(values)
next_values = map(lambda v: v, values)
for i in xrange(0, int(np.ceil((max_value - values_mean) / values_sd))):
inf = values_mean + (i * values_sd)
next_values = filter(lambda v: v > inf, next_values)
sup = values_mean + ((i + 1) * values_sd)
rel_size = calc_interval_size(filter(lambda v: v <= sup, next_values), size)
intervals.update({i + 1: rel_size})
return clean_dict(intervals)
class Loyalty(Thread):
def __init__(self, in_filename, out_filename):
super(Loyalty, self).__init__()
self.in_filename = in_filename
self.out_filename = out_filename
def run(self):
with open(self.in_filename, 'r') as infile:
loyalties = {}
for line in infile.readlines():
line_json = json.loads(line)
key = line_json.keys()[0]
loyalties.update({key: calc_alters(line_json[key])})
save_json(self.out_filename, data=loyalties)
class TopAlters(Thread):
def __init__(self, in_filename, out_filename, ids=[]):
super(TopAlters, self).__init__()
self.in_filename = in_filename
self.out_filename = out_filename
self.ids = ids
def run(self):
with open(self.in_filename, 'r') as infile:
top_alters = {}
json_values = json.load(infile)['intervals']
has_ids = np.size(self.ids) == 0
for json_value in json_values:
if has_ids or int(json_value) in self.ids:
if len(json_values[json_value]) > 0:
try:
max_interval = np.max(np.array(json_values[json_value].keys()).astype(int))
except ValueError as v:
print json_value, len(json_values[json_value]), json_values[json_value]
raise v
top_alters.update({json_value: max_interval})
df = pd.DataFrame()
df['ego_id'] = top_alters.keys()
df['max_interval'] = top_alters.values()
df.to_csv(self.out_filename)
def text_loyalty():
for filename in ['like.jsons',
'mention.jsons',
'retweet.jsons']:
in_filename = PATH + 'Text.Distributions/' + filename
out_filename = PATH + 'Text.Loyalty/' + format()
Loyalty(in_filename=in_filename, out_filename=out_filename).run()
def interactions_loyalty():
for filename in ['like.jsons',
'mention.jsons',
'retweet.jsons',
'union.jsons']:
in_filename = PATH + 'Filtered.Distributions/' + filename
out_filename = PATH + 'Interactions.Loyalty/' + filename
Loyalty(in_filename=in_filename, out_filename=out_filename).run()
def check(filename):
with open(filename, 'r') as infile:
json_values = json.load(infile)['intervals']
for json_value in json_values:
print json_value, np.sum(json_values[json_value].values())
def top_alters():
for filename in [('like.jsons', 'like.csv'),
('mention.jsons', 'mention.csv'),
('retweet.jsons', 'retweet.csv'),
('union.jsons', 'union.csv')]:
in_filename = PATH + 'Interactions.Loyalty/' + filename[0]
out_filename = PATH + 'Interactions.TopAlters/' + filename[1]
TopAlters(in_filename=in_filename, out_filename=out_filename).run()
def friend_top_alters():
# for filename in [('like.jsons', 'd_friend_like.csv'),
# ('mention.jsons', 'd_friend_mention.csv'),
# ('retweet.jsons', 'd_friend_retweet.csv')]:
# df_friends = pd.read_csv(FRIENDS_PATH + filename[1])
# friends = df_friends['seed_id'].values
# in_filename = PATH + 'Interactions.Loyalty/' + filename[0]
# out_filename = PATH + 'Interactions.TopAlters/' + filename[1]
# TopAlters(in_filename=in_filename, out_filename=out_filename, friends=friends).run()
union_friends = set()
for filename in ['d_friend_like.csv', 'd_friend_mention.csv', 'd_friend_retweet.csv']:
df_friends = pd.read_csv(DATASETS_PATH + filename)
friends = df_friends['seed_id'].values
union_friends.update(friends)
union_friends = list(union_friends)
in_filename = PATH + 'Interactions.Loyalty/union.jsons'
out_filename = PATH + 'Interactions.TopAlters/d_friend_union.csv'
TopAlters(in_filename=in_filename, out_filename=out_filename, ids=union_friends).run()
# friend_top_alters()
def language_top_atlers():
df = pd.read_csv(PROFILE_PATH)
langs = ['en', 'es', 'pt', 'fr', 'it', 'de', 'ja', 'others']
df['language'] = map(lambda lang: lang if 'en-' not in lang else 'en', df['language'])
df['language'] = map(lambda lang: lang if 'es-' not in lang else 'es', df['language'])
df['language'] = map(lambda lang: lang if 'pt-' not in lang else 'pt', df['language'])
df['language'] = map(lambda lang: lang if 'fr-' not in lang else 'fr', df['language'])
df['language'] = map(lambda lang: lang if 'it-' not in lang else 'it', df['language'])
df['language'] = map(lambda lang: lang if 'de-' not in lang else 'de', df['language'])
df['language'] = map(lambda lang: lang if 'ja-' not in lang else 'ja', df['language'])
df['language'] = map(lambda lang: lang if lang in langs else 'others', df['language'])
for language in langs:
lang_ids = df.loc[df['language'] == language]['user_id'].values
for filename in [('like.jsons', 'd_like_' + language + '.csv'),
('mention.jsons', 'd_mention_' + language + '.csv'),
('retweet.jsons', 'd_retweet_' + language + '.csv'),
('union.jsons', 'd_union_' + language + '.csv')]:
in_filename = PATH + 'Interactions.Loyalty/' + filename[0]
out_filename = PATH + 'Language.TopAlters/' + filename[1]
TopAlters(in_filename=in_filename, out_filename=out_filename, ids=lang_ids).run()
# friend_top_alters()
def datasets_size():
d_size = 188511.0
print '\n\nDatasets'
for dataset in np.sort(filter(lambda x: 'union' not in x, os.listdir(DATASETS_PATH))):
df = pd.read_csv(DATASETS_PATH + dataset)
size = np.size(df['seed_id'].values)
print dataset.split('.')[0], ' & '.join([str(size), str(round(100 * size / d_size, 2))])
print '\n\nLanguages Datasets'
for dataset in np.sort(filter(lambda x: 'union' not in x, os.listdir(PATH + 'Language.TopAlters/'))):
df = pd.read_csv(PATH + 'Language.TopAlters/' + dataset)
size = np.size(df['ego_id'].values)
print dataset.split('.')[0], ' & '.join([str(size), str(round(100 * size / d_size, 2))])
# datasets_size()
def get_top_alters(filename=None, values=[], ids=[]):
if filename is not None:
df = pd.read_csv(filename)
if len(ids) == 0:
values = df['max_interval'].values
else:
d = dict(zip(df['ego_id'], df['max_interval']))
values = [d[key] for key in ids]
values = filter(lambda x: x > 0, values)
total = float(np.size(values))
i0 = np.size(filter(lambda x: x == 0, values))
i1 = np.size(filter(lambda x: 1 <= x <= 3, values))
i2 = np.size(filter(lambda x: 4 <= x <= 6, values))
i3 = np.size(filter(lambda x: 7 <= x <= 9, values))
i4 = np.size(filter(lambda x: 10 <= x <= 12, values))
i5 = np.size(filter(lambda x: 13 <= x <= 15, values))
i6 = np.size(filter(lambda x: 16 <= x, values))
counts = {0: i0, 1: i1, 2: i2, 3: i3, 4: i4, 5: i5, 6: i6}
# counts = dict([(key, round(100 * counts[key] / total, 4)) for key in np.sort(counts.keys())])
return total, np.array(counts.values()).astype(float).tolist()
def check_top(filename):
with open(filename, 'r') as infile:
i = []
for line in infile.readlines():
values = np.array(line.split(' '))[1:]
i.append(float(values[0].split(':')[0]))
total, counts = get_top_alters(values=i)
print total, 100.0 * np.array(counts) / total, counts
def top_category():
langs = ['en', 'es', 'pt', 'fr', 'it', 'de', 'ja', 'others']
datapath = PATH + 'Interactions.TopAlters/'
langdatapath = PATH + 'Language.TopAlters/'
total, values = get_top_alters(datapath + 'like.csv')
print 'like.csv', total, ' & '.join(np.array(np.round(100.0 * np.array(values) / total, 4)).astype(str)) + '\\\\'
for lang in langs:
total, values = get_top_alters(langdatapath + 'd_like_' + lang + '.csv')
print 'd_like_' + lang + '.csv', total, ' & '.join(np.array(np.round(100.0 * np.array(values) / total, 4))
.astype(str)) + '\\\\'
print '\n'
total, values = get_top_alters(datapath + 'mention.csv')
print 'mention.csv', total, ' & '.join(
np.array(np.round(100.0 * np.array(values) / total, 4)).astype(str)) + '\\\\'
for lang in langs:
total, values = get_top_alters(langdatapath + 'd_mention_' + lang + '.csv')
print 'd_mention_' + lang + '.csv', total, ' & '.join(np.array(np.round(100.0 * np.array(values) / total, 4))
.astype(str)) + '\\\\'
print '\n'
total, values = get_top_alters(datapath + 'retweet.csv')
print 'retweet.csv', total, ' & '.join(
np.array(np.round(100.0 * np.array(values) / total, 4)).astype(str)) + '\\\\'
for lang in langs:
total, values = get_top_alters(langdatapath + 'd_retweet_' + lang + '.csv')
print 'd_retweet_' + lang + '.csv', total, ' & '.join(
np.array(np.round(100.0 * np.array(values) / total, 4)).astype(str)) + '\\\\'
print '\n'
total, values = get_top_alters(datapath + 'union.csv')
print 'union.csv', total, ' & '.join(
np.array(np.round(100.0 * np.array(values) / total, 4)).astype(str)) + '\\\\'
for lang in langs:
total, values = get_top_alters(langdatapath + 'd_union_' + lang + '.csv')
print 'd_union_' + lang + '.csv', total, ' & '.join(
np.array(np.round(100.0 * np.array(values) / total, 4)).astype(str)) + '\\\\'
# top_category()
def co_top_category():
ids = set()
for _file in ['d_like.csv', 'd_mention.csv', 'd_retweet.csv']:
_ids = np.array(pd.read_csv(DATASETS_PATH + _file)['seed_id']).astype(int)
if len(ids) > 0:
ids = ids.intersection(_ids.tolist())
else:
ids.update(_ids.tolist())
datapath = PATH + 'Interactions.TopAlters/'
total, values = get_top_alters(filename=datapath + 'like.csv', ids=list(ids))
print 'like.csv', total, ' & '.join(np.array(np.round(100.0 * np.array(values) / total, 4)).astype(str)) + '\\\\'
total, values = get_top_alters(filename=datapath + 'mention.csv', ids=list(ids))
print 'mention.csv', total, ' & '.join(
np.array(np.round(100.0 * np.array(values) / total, 4)).astype(str)) + '\\\\'
total, values = get_top_alters(filename=datapath + 'retweet.csv', ids=list(ids))
print 'retweet.csv', total, ' & '.join(
np.array(np.round(100.0 * np.array(values) / total, 4)).astype(str)) + '\\\\'
total, values = get_top_alters(filename=datapath + 'union.csv', ids=list(ids))
print 'union.csv', total, ' & '.join(
np.array(np.round(100.0 * np.array(values) / total, 4)).astype(str)) + '\\\\'
# co_top_category()
def friends_top_category():
for _file in [('d_like.csv', 'd_friend_like.csv'),
('d_mention.csv', 'd_friend_mention.csv'),
('d_retweet.csv', 'd_friend_retweet.csv'),
('d_union.csv', 'd_friend_union.csv')]:
# _ids = np.array(pd.read_csv(DATASETS_PATH + _file[1])['seed_id']).astype(int)
datapath = PATH + 'Interactions.TopAlters/'
total, values = get_top_alters(filename=datapath + _file[1])
print _file[0], total, ' & '.join(np.array(np.round(100.0 * np.array(values) / total, 4)).astype(str)) + '\\\\'
# friends_top_category()
def co_friends_top_category():
ids = set()
for _file in ['d_friend_like.csv',
'd_friend_mention.csv',
'd_friend_retweet.csv']:
_ids = np.array(pd.read_csv(DATASETS_PATH + _file)['seed_id']).astype(int)
if len(ids) > 0:
ids = ids.intersection(_ids.tolist())
else:
ids.update(_ids.tolist())
for _file in [('d_like.csv', 'd_friend_like.csv'),
('d_mention.csv', 'd_friend_mention.csv'),
('d_retweet.csv', 'd_friend_retweet.csv'),
('d_union.csv', 'd_friend_union.csv')]:
datapath = PATH + 'Interactions.TopAlters/'
total, values = get_top_alters(filename=datapath + _file[1], ids=list(ids))
print _file[0], total, ' & '.join(np.array(np.round(100.0 * np.array(values) / total, 4)).astype(str)) + '\\\\'
# co_friends_top_category()
def text_top_alters():
for filename in [('like.jsons', 'like.csv'),
('mention.jsons', 'mention.csv'),
('retweet.jsons', 'retweet.csv')]:
in_filename = PATH + 'Text.Loyalty/' + filename[0]
out_filename = PATH + 'Text.TopAlters/' + filename[1]
TopAlters(in_filename=in_filename, out_filename=out_filename).run()
# text_top_alters()
def text_top_category():
for _file in ['like.csv',
'mention.csv',
'retweet.csv']:
datapath = PATH + 'Text.TopAlters/'
total, values = get_top_alters(filename=datapath + _file)
print _file, total, ' & '.join(np.array(np.round(100.0 * np.array(values) / total, 4)).astype(str)) + '\\\\'
| bsd-2-clause |
abhishekgahlot/scikit-learn | examples/cluster/plot_segmentation_toy.py | 258 | 3336 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
cysuncn/python | study/machinelearning/tensorflow/faceSensor/PR/face_train_use_keras.py | 1 | 10408 | #-*- coding: utf-8 -*-
import random
import numpy as np
from sklearn.model_selection import train_test_split
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.utils import np_utils
from keras.models import load_model
from keras import backend as K
from load_face_dataset import load_dataset, resize_image, IMAGE_SIZE
class Dataset:
def __init__(self, path_name):
# 训练集
self.train_images = None
self.train_labels = None
# 验证集
self.valid_images = None
self.valid_labels = None
# 测试集
self.test_images = None
self.test_labels = None
# 数据集加载路径
self.path_name = path_name
# 当前库采用的维度顺序
self.input_shape = None
# 加载数据集并按照交叉验证的原则划分数据集并进行相关预处理工作
def load(self, img_rows=IMAGE_SIZE, img_cols=IMAGE_SIZE,
img_channels=3, nb_classes=2):
# 加载数据集到内存
images, labels = load_dataset(self.path_name)
train_images, valid_images, train_labels, valid_labels = train_test_split(images, labels, test_size=0.3,
random_state=random.randint(0, 100))
_, test_images, _, test_labels = train_test_split(images, labels, test_size=0.5,
random_state=random.randint(0, 100))
# 当前的维度顺序如果为'th',则输入图片数据时的顺序为:channels,rows,cols,否则:rows,cols,channels
# 这部分代码就是根据keras库要求的维度顺序重组训练数据集
if K.image_dim_ordering() == 'th':
train_images = train_images.reshape(train_images.shape[0], img_channels, img_rows, img_cols)
valid_images = valid_images.reshape(valid_images.shape[0], img_channels, img_rows, img_cols)
test_images = test_images.reshape(test_images.shape[0], img_channels, img_rows, img_cols)
self.input_shape = (img_channels, img_rows, img_cols)
else:
train_images = train_images.reshape(train_images.shape[0], img_rows, img_cols, img_channels)
valid_images = valid_images.reshape(valid_images.shape[0], img_rows, img_cols, img_channels)
test_images = test_images.reshape(test_images.shape[0], img_rows, img_cols, img_channels)
self.input_shape = (img_rows, img_cols, img_channels)
# 输出训练集、验证集、测试集的数量
print(train_images.shape[0], 'train samples')
print(valid_images.shape[0], 'valid samples')
print(test_images.shape[0], 'test samples')
# 我们的模型使用categorical_crossentropy作为损失函数,因此需要根据类别数量nb_classes将
# 类别标签进行one-hot编码使其向量化,在这里我们的类别只有两种,经过转化后标签数据变为二维
train_labels = np_utils.to_categorical(train_labels, nb_classes)
valid_labels = np_utils.to_categorical(valid_labels, nb_classes)
test_labels = np_utils.to_categorical(test_labels, nb_classes)
# 像素数据浮点化以便归一化
train_images = train_images.astype('float32')
valid_images = valid_images.astype('float32')
test_images = test_images.astype('float32')
# 将其归一化,图像的各像素值归一化到0~1区间
train_images /= 255
valid_images /= 255
test_images /= 255
self.train_images = train_images
self.valid_images = valid_images
self.test_images = test_images
self.train_labels = train_labels
self.valid_labels = valid_labels
self.test_labels = test_labels
# CNN网络模型类
class Model:
def __init__(self):
self.model = None
def build_model(self, dataset, nb_classes=2):
# 构建一个空的网络模型,它是一个线性堆叠模型,各神经网络层会被顺序添加,专业名称为序贯模型或线性堆叠模型
self.model = Sequential()
# 以下代码将顺序添加CNN网络需要的各层,一个add就是一个网络层
self.model.add(Convolution2D(32, 3, 3, border_mode='same',
input_shape=dataset.input_shape)) # 1 2维卷积层
self.model.add(Activation('relu')) # 2 激活函数层
self.model.add(Convolution2D(32, 3, 3)) # 3 2维卷积层
self.model.add(Activation('relu')) # 4 激活函数层
self.model.add(MaxPooling2D(pool_size=(2, 2))) # 5 池化层
self.model.add(Dropout(0.25)) # 6 Dropout层
self.model.add(Convolution2D(64, 3, 3, border_mode='same')) # 7 2维卷积层
self.model.add(Activation('relu')) # 8 激活函数层
self.model.add(Convolution2D(64, 3, 3)) # 9 2维卷积层
self.model.add(Activation('relu')) # 10 激活函数层
self.model.add(MaxPooling2D(pool_size=(2, 2))) # 11 池化层
self.model.add(Dropout(0.25)) # 12 Dropout层
self.model.add(Flatten()) # 13 Flatten层
self.model.add(Dense(512)) # 14 Dense层,又被称作全连接层
self.model.add(Activation('relu')) # 15 激活函数层
self.model.add(Dropout(0.5)) # 16 Dropout层
self.model.add(Dense(nb_classes)) # 17 Dense层
self.model.add(Activation('softmax')) # 18 分类层,输出最终结果
# 输出模型概况
self.model.summary()
def train(self, dataset, batch_size=20, nb_epoch=10, data_augmentation=True):
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) # 采用SGD+momentum的优化器进行训练,首先生成一个优化器对象
self.model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) # 完成实际的模型配置工作
# 不使用数据提升,所谓的提升就是从我们提供的训练数据中利用旋转、翻转、加噪声等方法创造新的
# 训练数据,有意识的提升训练数据规模,增加模型训练量
if not data_augmentation:
self.model.fit(dataset.train_images, dataset.train_labels, batch_size=batch_size, nb_epoch=nb_epoch,
validation_data=(dataset.valid_images, dataset.valid_labels), shuffle=True)
# 使用实时数据提升
else:
# 定义数据生成器用于数据提升,其返回一个生成器对象datagen,datagen每被调用一
# 次其生成一组数据(顺序生成),节省内存,其实就是python的数据生成器
datagen = ImageDataGenerator(featurewise_center=False, # 是否使输入数据去中心化(均值为0),
samplewise_center=False, # 是否使输入数据的每个样本均值为0
featurewise_std_normalization=False, # 是否数据标准化(输入数据除以数据集的标准差)
samplewise_std_normalization=False, # 是否将每个样本数据除以自身的标准差
zca_whitening=False, # 是否对输入数据施以ZCA白化
rotation_range=20, # 数据提升时图片随机转动的角度(范围为0~180)
width_shift_range=0.2, # 数据提升时图片水平偏移的幅度(单位为图片宽度的占比,0~1之间的浮点数)
height_shift_range=0.2, # 同上,只不过这里是垂直
horizontal_flip=True, # 是否进行随机水平翻转
vertical_flip=False) # 是否进行随机垂直翻转
# 计算整个训练样本集的数量以用于特征值归一化、ZCA白化等处理
datagen.fit(dataset.train_images)
# 利用生成器开始训练模型
self.model.fit_generator(datagen.flow(dataset.train_images, dataset.train_labels, batch_size=batch_size),
samples_per_epoch=dataset.train_images.shape[0], nb_epoch=nb_epoch,
validation_data=(dataset.valid_images, dataset.valid_labels))
MODEL_PATH = 'D:\model\me.face.model.h5'
def save_model(self, file_path=MODEL_PATH):
self.model.save(file_path)
def load_model(self, file_path=MODEL_PATH):
self.model = load_model(file_path)
def evaluate(self, dataset):
score = self.model.evaluate(dataset.test_images, dataset.test_labels, verbose=1)
print("%s: %.2f%%" % (self.model.metrics_names[1], score[1] * 100))
# 识别人脸
def face_predict(self, image):
# 依然是根据后端系统确定维度顺序
if K.image_dim_ordering() == 'th' and image.shape != (1, 3, IMAGE_SIZE, IMAGE_SIZE):
image = resize_image(image) # 尺寸必须与训练集一致都应该是IMAGE_SIZE x IMAGE_SIZE
image = image.reshape((1, 3, IMAGE_SIZE, IMAGE_SIZE)) # 与模型训练不同,这次只是针对1张图片进行预测
elif K.image_dim_ordering() == 'tf' and image.shape != (1, IMAGE_SIZE, IMAGE_SIZE, 3):
image = resize_image(image)
image = image.reshape((1, IMAGE_SIZE, IMAGE_SIZE, 3))
# 浮点并归一化
image = image.astype('float32')
image /= 255
# 给出输入属于各个类别的概率,我们是二值类别,则该函数会给出输入图像属于0和1的概率各为多少
result = self.model.predict_proba(image)
print('result:', result)
# 给出类别预测:0或者1
result = self.model.predict_classes_customerize(image)
# 返回类别预测结果
return result[0]
if __name__ == '__main__':
dataset = Dataset('D:/data')
dataset.load()
model = Model()
model.build_model(dataset)
model.train(dataset)
model.save_model(file_path='D:\model\me.face.model.h5')
#评估模型
model = Model()
model.load_model(file_path='D:\model\me.face.model.h5')
model.evaluate(dataset)
| gpl-3.0 |
dubourg/openturns | python/test/t_features.py | 1 | 2344 | #! /usr/bin/env python
from __future__ import print_function
import os
width = 40
# check that python can load OpenTURNS module
print('1: Python module load'.ljust(width), end=' ')
try:
import openturns as ot
print('OK')
except:
print('no')
# check that python can find the Viewer module
# If it fails, check that matplotlib package is installed
print('2: Viewer (matplotlib)'.ljust(width), end=' ')
try:
import openturns.viewer
print('OK')
except:
print('no')
# check that OpenTURNS can run R
# It should produce a file named testDraw.png
print('3: drawing (R)'.ljust(width), end=' ')
try:
graph = ot.Normal().drawPDF()
fname = 'testDraw.png'
try:
graph.draw(fname)
os.remove(fname)
except:
raise
print('OK')
except:
print('no')
# check that rot package is installed
print('4: linear model (R.rot)'.ljust(width), end=' ')
try:
lm = ot.LinearModelFactory().build(
ot.Normal(2).getSample(10), ot.Normal().getSample(10))
print('OK')
except:
print('no')
# check XML support
print('5: serialization (LibXML2)'.ljust(width), end=' ')
try:
storageManager = ot.XMLStorageManager('myFile.xml')
print('OK')
except:
print('no')
# check that analytical function are available
print('6: analytical function (muParser)'.ljust(width), end=' ')
try:
f = ot.NumericalMathFunction(['x1', 'x2'], ['y'], ['x1+x2'])
print('OK')
except:
print('no')
# check that hmat library was found
print('7: HMatrix (hmat-oss)'.ljust(width), end=' ')
try:
# This is a little bit tricky because HMat 1.0 fails with 1x1 matrices
ot.ResourceMap.SetAsUnsignedInteger(
'TemporalNormalProcess-SamplingMethod', 1)
vertices = [[0.0, 0.0, 0.0]]
vertices.append([1.0, 0.0, 0.0])
vertices.append([0.0, 1.0, 0.0])
vertices.append([0.0, 0.0, 1.0])
simplices = [[0, 1, 2, 3]]
# Discard messages from HMat
ot.Log.Show(0)
process = ot.TemporalNormalProcess(
ot.ExponentialModel(3), ot.Mesh(vertices, simplices))
f = process.getRealization()
print('OK')
except:
print('no')
# check that nlopt library was found
print('8: optimization (NLopt)'.ljust(width), end=' ')
try:
problem = ot.OptimizationProblem()
algo = ot.SLSQP()
algo.setProblem(problem)
print('OK')
except:
print('no')
| gpl-3.0 |
dbftdiyoeywga/bards | bards/utils.py | 1 | 3353 | import time
import os
import random
import numpy as np
import pandas as pd
import argparse
import mlflow
from pathlib import Path
from contextlib import contextmanager
@contextmanager
def timer(name):
t0 = time.time()
yield
print(f"[{name}] done in {time.time() - t0:.0f} s")
def reduce_mem_usage(df: pd.DataFrame):
"""iterate through all the columns of a dataframe and modify the data type
to reduce memory usage.
"""
start_mem = df.memory_usage().sum() / 1024 ** 2
print("Memory usage of dataframe is {:.2f} MB".format(start_mem))
for col in df.columns:
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == "int":
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if (
c_min > np.finfo(np.float16).min
and c_max < np.finfo(np.float16).max
):
df[col] = df[col].astype(np.float16)
elif (
c_min > np.finfo(np.float32).min
and c_max < np.finfo(np.float32).max
):
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype("category")
end_mem = df.memory_usage().sum() / 1024 ** 2
print("Memory usage after optimization is: {:.2f} MB".format(end_mem))
print("Decreased by {:.1f}%".format(100 * (start_mem - end_mem) / start_mem))
return df
def seed_everything(seed: int):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
def csv2feather(path: str, target=None):
df = pd.read_csv(path)
p = Path(path)
o_path = p.with_suffix(".ftr")
if Path(o_path).exists():
pass
else:
df.to_feather(o_path)
if target is not None:
o_path = p.parent / "target.ftr"
if Path(o_path).exists():
pass
else:
df_ = pd.DataFrame()
df_["target"] = df[target]
df_.to_feather(o_path)
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
"--force", "-f", action="store_true", help="Overwrite existing files"
)
return parser.parse_args()
def save_log(score_dict):
mlflow.log_metrics(score_dict)
mlflow.log_artifact("./config/config.yaml")
def load_dataset(features: list):
train = [pd.read_feather(f"./features/{f}_train.ftr") for f in features]
test = [pd.read_feather(f"./features/{f}_test.ftr") for f in features]
return pd.concat(train, axis=1), pd.concat(test, axis=1)
def load_target(feature: str):
target = pd.read_feather("./data/raw/")
| mit |
rothnic/bokeh | bokeh/charts/builder/tests/test_line_builder.py | 33 | 2376 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import Line
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestLine(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict()
y_python = xyvalues['python'] = [2, 3, 7, 5, 26]
y_pypy = xyvalues['pypy'] = [12, 33, 47, 15, 126]
y_jython = xyvalues['jython'] = [22, 43, 10, 25, 26]
xyvaluesdf = pd.DataFrame(xyvalues)
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(Line, _xy)
builder = hm._builders[0]
self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['x'], [0, 1, 2, 3, 4])
assert_array_equal(builder._data['y_python'], y_python)
assert_array_equal(builder._data['y_pypy'], y_pypy)
assert_array_equal(builder._data['y_jython'], y_jython)
lvalues = [[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]]
for _xy in [lvalues, np.array(lvalues)]:
hm = create_chart(Line, _xy)
builder = hm._builders[0]
self.assertEqual(builder._groups, ['0', '1', '2'])
assert_array_equal(builder._data['x'], [0, 1, 2, 3, 4])
assert_array_equal(builder._data['y_0'], y_python)
assert_array_equal(builder._data['y_1'], y_pypy)
assert_array_equal(builder._data['y_2'], y_jython)
| bsd-3-clause |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/tests/test_internals.py | 7 | 47893 | # -*- coding: utf-8 -*-
# pylint: disable=W0102
from datetime import datetime, date
import nose
import numpy as np
import re
import itertools
from pandas import (Index, MultiIndex, DataFrame, DatetimeIndex,
Series, Categorical)
from pandas.compat import OrderedDict, lrange
from pandas.sparse.array import SparseArray
from pandas.core.internals import (BlockPlacement, SingleBlockManager,
make_block, BlockManager)
import pandas.core.algorithms as algos
import pandas.util.testing as tm
import pandas as pd
from pandas import lib
from pandas.util.testing import (assert_almost_equal, assert_frame_equal,
randn, assert_series_equal)
from pandas.compat import zip, u
def assert_block_equal(left, right):
tm.assert_numpy_array_equal(left.values, right.values)
assert (left.dtype == right.dtype)
tm.assertIsInstance(left.mgr_locs, lib.BlockPlacement)
tm.assertIsInstance(right.mgr_locs, lib.BlockPlacement)
tm.assert_numpy_array_equal(left.mgr_locs.as_array,
right.mgr_locs.as_array)
def get_numeric_mat(shape):
arr = np.arange(shape[0])
return np.lib.stride_tricks.as_strided(x=arr, shape=shape, strides=(
arr.itemsize, ) + (0, ) * (len(shape) - 1)).copy()
N = 10
def create_block(typestr, placement, item_shape=None, num_offset=0):
"""
Supported typestr:
* float, f8, f4, f2
* int, i8, i4, i2, i1
* uint, u8, u4, u2, u1
* complex, c16, c8
* bool
* object, string, O
* datetime, dt, M8[ns], M8[ns, tz]
* timedelta, td, m8[ns]
* sparse (SparseArray with fill_value=0.0)
* sparse_na (SparseArray with fill_value=np.nan)
* category, category2
"""
placement = BlockPlacement(placement)
num_items = len(placement)
if item_shape is None:
item_shape = (N, )
shape = (num_items, ) + item_shape
mat = get_numeric_mat(shape)
if typestr in ('float', 'f8', 'f4', 'f2', 'int', 'i8', 'i4', 'i2', 'i1',
'uint', 'u8', 'u4', 'u2', 'u1'):
values = mat.astype(typestr) + num_offset
elif typestr in ('complex', 'c16', 'c8'):
values = 1.j * (mat.astype(typestr) + num_offset)
elif typestr in ('object', 'string', 'O'):
values = np.reshape(['A%d' % i for i in mat.ravel() + num_offset],
shape)
elif typestr in ('b', 'bool', ):
values = np.ones(shape, dtype=np.bool_)
elif typestr in ('datetime', 'dt', 'M8[ns]'):
values = (mat * 1e9).astype('M8[ns]')
elif typestr.startswith('M8[ns'):
# datetime with tz
m = re.search(r'M8\[ns,\s*(\w+\/?\w*)\]', typestr)
assert m is not None, "incompatible typestr -> {0}".format(typestr)
tz = m.groups()[0]
assert num_items == 1, "must have only 1 num items for a tz-aware"
values = DatetimeIndex(np.arange(N) * 1e9, tz=tz)
elif typestr in ('timedelta', 'td', 'm8[ns]'):
values = (mat * 1).astype('m8[ns]')
elif typestr in ('category', ):
values = Categorical([1, 1, 2, 2, 3, 3, 3, 3, 4, 4])
elif typestr in ('category2', ):
values = Categorical(['a', 'a', 'a', 'a', 'b', 'b', 'c', 'c', 'c', 'd'
])
elif typestr in ('sparse', 'sparse_na'):
# FIXME: doesn't support num_rows != 10
assert shape[-1] == 10
assert all(s == 1 for s in shape[:-1])
if typestr.endswith('_na'):
fill_value = np.nan
else:
fill_value = 0.0
values = SparseArray([fill_value, fill_value, 1, 2, 3, fill_value,
4, 5, fill_value, 6], fill_value=fill_value)
arr = values.sp_values.view()
arr += (num_offset - 1)
else:
raise ValueError('Unsupported typestr: "%s"' % typestr)
return make_block(values, placement=placement, ndim=len(shape))
def create_single_mgr(typestr, num_rows=None):
if num_rows is None:
num_rows = N
return SingleBlockManager(
create_block(typestr, placement=slice(0, num_rows), item_shape=()),
np.arange(num_rows))
def create_mgr(descr, item_shape=None):
"""
Construct BlockManager from string description.
String description syntax looks similar to np.matrix initializer. It looks
like this::
a,b,c: f8; d,e,f: i8
Rules are rather simple:
* see list of supported datatypes in `create_block` method
* components are semicolon-separated
* each component is `NAME,NAME,NAME: DTYPE_ID`
* whitespace around colons & semicolons are removed
* components with same DTYPE_ID are combined into single block
* to force multiple blocks with same dtype, use '-SUFFIX'::
'a:f8-1; b:f8-2; c:f8-foobar'
"""
if item_shape is None:
item_shape = (N, )
offset = 0
mgr_items = []
block_placements = OrderedDict()
for d in descr.split(';'):
d = d.strip()
if not len(d):
continue
names, blockstr = d.partition(':')[::2]
blockstr = blockstr.strip()
names = names.strip().split(',')
mgr_items.extend(names)
placement = list(np.arange(len(names)) + offset)
try:
block_placements[blockstr].extend(placement)
except KeyError:
block_placements[blockstr] = placement
offset += len(names)
mgr_items = Index(mgr_items)
blocks = []
num_offset = 0
for blockstr, placement in block_placements.items():
typestr = blockstr.split('-')[0]
blocks.append(create_block(typestr,
placement,
item_shape=item_shape,
num_offset=num_offset, ))
num_offset += len(placement)
return BlockManager(sorted(blocks, key=lambda b: b.mgr_locs[0]),
[mgr_items] + [np.arange(n) for n in item_shape])
class TestBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
# self.fblock = get_float_ex() # a,c,e
# self.cblock = get_complex_ex() #
# self.oblock = get_obj_ex()
# self.bool_block = get_bool_ex()
# self.int_block = get_int_ex()
self.fblock = create_block('float', [0, 2, 4])
self.cblock = create_block('complex', [7])
self.oblock = create_block('object', [1, 3])
self.bool_block = create_block('bool', [5])
self.int_block = create_block('int', [6])
def test_constructor(self):
int32block = create_block('i4', [0])
self.assertEqual(int32block.dtype, np.int32)
def test_pickle(self):
def _check(blk):
assert_block_equal(self.round_trip_pickle(blk), blk)
_check(self.fblock)
_check(self.cblock)
_check(self.oblock)
_check(self.bool_block)
def test_mgr_locs(self):
tm.assertIsInstance(self.fblock.mgr_locs, lib.BlockPlacement)
tm.assert_numpy_array_equal(self.fblock.mgr_locs.as_array,
np.array([0, 2, 4], dtype=np.int64))
def test_attrs(self):
self.assertEqual(self.fblock.shape, self.fblock.values.shape)
self.assertEqual(self.fblock.dtype, self.fblock.values.dtype)
self.assertEqual(len(self.fblock), len(self.fblock.values))
def test_merge(self):
avals = randn(2, 10)
bvals = randn(2, 10)
ref_cols = Index(['e', 'a', 'b', 'd', 'f'])
ablock = make_block(avals, ref_cols.get_indexer(['e', 'b']))
bblock = make_block(bvals, ref_cols.get_indexer(['a', 'd']))
merged = ablock.merge(bblock)
tm.assert_numpy_array_equal(merged.mgr_locs.as_array,
np.array([0, 1, 2, 3], dtype=np.int64))
tm.assert_numpy_array_equal(merged.values[[0, 2]], np.array(avals))
tm.assert_numpy_array_equal(merged.values[[1, 3]], np.array(bvals))
# TODO: merge with mixed type?
def test_copy(self):
cop = self.fblock.copy()
self.assertIsNot(cop, self.fblock)
assert_block_equal(self.fblock, cop)
def test_reindex_index(self):
pass
def test_reindex_cast(self):
pass
def test_insert(self):
pass
def test_delete(self):
newb = self.fblock.copy()
newb.delete(0)
tm.assertIsInstance(newb.mgr_locs, lib.BlockPlacement)
tm.assert_numpy_array_equal(newb.mgr_locs.as_array,
np.array([2, 4], dtype=np.int64))
self.assertTrue((newb.values[0] == 1).all())
newb = self.fblock.copy()
newb.delete(1)
tm.assertIsInstance(newb.mgr_locs, lib.BlockPlacement)
tm.assert_numpy_array_equal(newb.mgr_locs.as_array,
np.array([0, 4], dtype=np.int64))
self.assertTrue((newb.values[1] == 2).all())
newb = self.fblock.copy()
newb.delete(2)
tm.assert_numpy_array_equal(newb.mgr_locs.as_array,
np.array([0, 2], dtype=np.int64))
self.assertTrue((newb.values[1] == 1).all())
newb = self.fblock.copy()
self.assertRaises(Exception, newb.delete, 3)
def test_split_block_at(self):
# with dup column support this method was taken out
# GH3679
raise nose.SkipTest("skipping for now")
bs = list(self.fblock.split_block_at('a'))
self.assertEqual(len(bs), 1)
self.assertTrue(np.array_equal(bs[0].items, ['c', 'e']))
bs = list(self.fblock.split_block_at('c'))
self.assertEqual(len(bs), 2)
self.assertTrue(np.array_equal(bs[0].items, ['a']))
self.assertTrue(np.array_equal(bs[1].items, ['e']))
bs = list(self.fblock.split_block_at('e'))
self.assertEqual(len(bs), 1)
self.assertTrue(np.array_equal(bs[0].items, ['a', 'c']))
# bblock = get_bool_ex(['f'])
# bs = list(bblock.split_block_at('f'))
# self.assertEqual(len(bs), 0)
class TestDatetimeBlock(tm.TestCase):
_multiprocess_can_split_ = True
def test_try_coerce_arg(self):
block = create_block('datetime', [0])
# coerce None
none_coerced = block._try_coerce_args(block.values, None)[2]
self.assertTrue(pd.Timestamp(none_coerced) is pd.NaT)
# coerce different types of date bojects
vals = (np.datetime64('2010-10-10'), datetime(2010, 10, 10),
date(2010, 10, 10))
for val in vals:
coerced = block._try_coerce_args(block.values, val)[2]
self.assertEqual(np.int64, type(coerced))
self.assertEqual(pd.Timestamp('2010-10-10'), pd.Timestamp(coerced))
class TestBlockManager(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.mgr = create_mgr(
'a: f8; b: object; c: f8; d: object; e: f8;'
'f: bool; g: i8; h: complex; i: datetime-1; j: datetime-2;'
'k: M8[ns, US/Eastern]; l: M8[ns, CET];')
def test_constructor_corner(self):
pass
def test_attrs(self):
mgr = create_mgr('a,b,c: f8-1; d,e,f: f8-2')
self.assertEqual(mgr.nblocks, 2)
self.assertEqual(len(mgr), 6)
def test_is_mixed_dtype(self):
self.assertFalse(create_mgr('a,b:f8').is_mixed_type)
self.assertFalse(create_mgr('a:f8-1; b:f8-2').is_mixed_type)
self.assertTrue(create_mgr('a,b:f8; c,d: f4').is_mixed_type)
self.assertTrue(create_mgr('a,b:f8; c,d: object').is_mixed_type)
def test_is_indexed_like(self):
mgr1 = create_mgr('a,b: f8')
mgr2 = create_mgr('a:i8; b:bool')
mgr3 = create_mgr('a,b,c: f8')
self.assertTrue(mgr1._is_indexed_like(mgr1))
self.assertTrue(mgr1._is_indexed_like(mgr2))
self.assertTrue(mgr1._is_indexed_like(mgr3))
self.assertFalse(mgr1._is_indexed_like(mgr1.get_slice(
slice(-1), axis=1)))
def test_duplicate_ref_loc_failure(self):
tmp_mgr = create_mgr('a:bool; a: f8')
axes, blocks = tmp_mgr.axes, tmp_mgr.blocks
blocks[0].mgr_locs = np.array([0])
blocks[1].mgr_locs = np.array([0])
# test trying to create block manager with overlapping ref locs
self.assertRaises(AssertionError, BlockManager, blocks, axes)
blocks[0].mgr_locs = np.array([0])
blocks[1].mgr_locs = np.array([1])
mgr = BlockManager(blocks, axes)
mgr.iget(1)
def test_contains(self):
self.assertIn('a', self.mgr)
self.assertNotIn('baz', self.mgr)
def test_pickle(self):
mgr2 = self.round_trip_pickle(self.mgr)
assert_frame_equal(DataFrame(self.mgr), DataFrame(mgr2))
# share ref_items
# self.assertIs(mgr2.blocks[0].ref_items, mgr2.blocks[1].ref_items)
# GH2431
self.assertTrue(hasattr(mgr2, "_is_consolidated"))
self.assertTrue(hasattr(mgr2, "_known_consolidated"))
# reset to False on load
self.assertFalse(mgr2._is_consolidated)
self.assertFalse(mgr2._known_consolidated)
def test_non_unique_pickle(self):
mgr = create_mgr('a,a,a:f8')
mgr2 = self.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
mgr = create_mgr('a: f8; a: i8')
mgr2 = self.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
def test_categorical_block_pickle(self):
mgr = create_mgr('a: category')
mgr2 = self.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
smgr = create_single_mgr('category')
smgr2 = self.round_trip_pickle(smgr)
assert_series_equal(Series(smgr), Series(smgr2))
def test_get_scalar(self):
for item in self.mgr.items:
for i, index in enumerate(self.mgr.axes[1]):
res = self.mgr.get_scalar((item, index))
exp = self.mgr.get(item, fastpath=False)[i]
self.assertEqual(res, exp)
exp = self.mgr.get(item).internal_values()[i]
self.assertEqual(res, exp)
def test_get(self):
cols = Index(list('abc'))
values = np.random.rand(3, 3)
block = make_block(values=values.copy(), placement=np.arange(3))
mgr = BlockManager(blocks=[block], axes=[cols, np.arange(3)])
assert_almost_equal(mgr.get('a', fastpath=False), values[0])
assert_almost_equal(mgr.get('b', fastpath=False), values[1])
assert_almost_equal(mgr.get('c', fastpath=False), values[2])
assert_almost_equal(mgr.get('a').internal_values(), values[0])
assert_almost_equal(mgr.get('b').internal_values(), values[1])
assert_almost_equal(mgr.get('c').internal_values(), values[2])
def test_set(self):
mgr = create_mgr('a,b,c: int', item_shape=(3, ))
mgr.set('d', np.array(['foo'] * 3))
mgr.set('b', np.array(['bar'] * 3))
tm.assert_numpy_array_equal(mgr.get('a').internal_values(),
np.array([0] * 3))
tm.assert_numpy_array_equal(mgr.get('b').internal_values(),
np.array(['bar'] * 3, dtype=np.object_))
tm.assert_numpy_array_equal(mgr.get('c').internal_values(),
np.array([2] * 3))
tm.assert_numpy_array_equal(mgr.get('d').internal_values(),
np.array(['foo'] * 3, dtype=np.object_))
def test_insert(self):
self.mgr.insert(0, 'inserted', np.arange(N))
self.assertEqual(self.mgr.items[0], 'inserted')
assert_almost_equal(self.mgr.get('inserted'), np.arange(N))
for blk in self.mgr.blocks:
yield self.assertIs, self.mgr.items, blk.ref_items
def test_set_change_dtype(self):
self.mgr.set('baz', np.zeros(N, dtype=bool))
self.mgr.set('baz', np.repeat('foo', N))
self.assertEqual(self.mgr.get('baz').dtype, np.object_)
mgr2 = self.mgr.consolidate()
mgr2.set('baz', np.repeat('foo', N))
self.assertEqual(mgr2.get('baz').dtype, np.object_)
mgr2.set('quux', randn(N).astype(int))
self.assertEqual(mgr2.get('quux').dtype, np.int_)
mgr2.set('quux', randn(N))
self.assertEqual(mgr2.get('quux').dtype, np.float_)
def test_set_change_dtype_slice(self): # GH8850
cols = MultiIndex.from_tuples([('1st', 'a'), ('2nd', 'b'), ('3rd', 'c')
])
df = DataFrame([[1.0, 2, 3], [4.0, 5, 6]], columns=cols)
df['2nd'] = df['2nd'] * 2.0
self.assertEqual(sorted(df.blocks.keys()), ['float64', 'int64'])
assert_frame_equal(df.blocks['float64'], DataFrame(
[[1.0, 4.0], [4.0, 10.0]], columns=cols[:2]))
assert_frame_equal(df.blocks['int64'], DataFrame(
[[3], [6]], columns=cols[2:]))
def test_copy(self):
cp = self.mgr.copy(deep=False)
for blk, cp_blk in zip(self.mgr.blocks, cp.blocks):
# view assertion
self.assertTrue(cp_blk.equals(blk))
self.assertTrue(cp_blk.values.base is blk.values.base)
cp = self.mgr.copy(deep=True)
for blk, cp_blk in zip(self.mgr.blocks, cp.blocks):
# copy assertion we either have a None for a base or in case of
# some blocks it is an array (e.g. datetimetz), but was copied
self.assertTrue(cp_blk.equals(blk))
if cp_blk.values.base is not None and blk.values.base is not None:
self.assertFalse(cp_blk.values.base is blk.values.base)
else:
self.assertTrue(cp_blk.values.base is None and blk.values.base
is None)
def test_sparse(self):
mgr = create_mgr('a: sparse-1; b: sparse-2')
# what to test here?
self.assertEqual(mgr.as_matrix().dtype, np.float64)
def test_sparse_mixed(self):
mgr = create_mgr('a: sparse-1; b: sparse-2; c: f8')
self.assertEqual(len(mgr.blocks), 3)
self.assertIsInstance(mgr, BlockManager)
# what to test here?
def test_as_matrix_float(self):
mgr = create_mgr('c: f4; d: f2; e: f8')
self.assertEqual(mgr.as_matrix().dtype, np.float64)
mgr = create_mgr('c: f4; d: f2')
self.assertEqual(mgr.as_matrix().dtype, np.float32)
def test_as_matrix_int_bool(self):
mgr = create_mgr('a: bool-1; b: bool-2')
self.assertEqual(mgr.as_matrix().dtype, np.bool_)
mgr = create_mgr('a: i8-1; b: i8-2; c: i4; d: i2; e: u1')
self.assertEqual(mgr.as_matrix().dtype, np.int64)
mgr = create_mgr('c: i4; d: i2; e: u1')
self.assertEqual(mgr.as_matrix().dtype, np.int32)
def test_as_matrix_datetime(self):
mgr = create_mgr('h: datetime-1; g: datetime-2')
self.assertEqual(mgr.as_matrix().dtype, 'M8[ns]')
def test_as_matrix_datetime_tz(self):
mgr = create_mgr('h: M8[ns, US/Eastern]; g: M8[ns, CET]')
self.assertEqual(mgr.get('h').dtype, 'datetime64[ns, US/Eastern]')
self.assertEqual(mgr.get('g').dtype, 'datetime64[ns, CET]')
self.assertEqual(mgr.as_matrix().dtype, 'object')
def test_astype(self):
# coerce all
mgr = create_mgr('c: f4; d: f2; e: f8')
for t in ['float16', 'float32', 'float64', 'int32', 'int64']:
t = np.dtype(t)
tmgr = mgr.astype(t)
self.assertEqual(tmgr.get('c').dtype.type, t)
self.assertEqual(tmgr.get('d').dtype.type, t)
self.assertEqual(tmgr.get('e').dtype.type, t)
# mixed
mgr = create_mgr('a,b: object; c: bool; d: datetime;'
'e: f4; f: f2; g: f8')
for t in ['float16', 'float32', 'float64', 'int32', 'int64']:
t = np.dtype(t)
tmgr = mgr.astype(t, raise_on_error=False)
self.assertEqual(tmgr.get('c').dtype.type, t)
self.assertEqual(tmgr.get('e').dtype.type, t)
self.assertEqual(tmgr.get('f').dtype.type, t)
self.assertEqual(tmgr.get('g').dtype.type, t)
self.assertEqual(tmgr.get('a').dtype.type, np.object_)
self.assertEqual(tmgr.get('b').dtype.type, np.object_)
if t != np.int64:
self.assertEqual(tmgr.get('d').dtype.type, np.datetime64)
else:
self.assertEqual(tmgr.get('d').dtype.type, t)
def test_convert(self):
def _compare(old_mgr, new_mgr):
""" compare the blocks, numeric compare ==, object don't """
old_blocks = set(old_mgr.blocks)
new_blocks = set(new_mgr.blocks)
self.assertEqual(len(old_blocks), len(new_blocks))
# compare non-numeric
for b in old_blocks:
found = False
for nb in new_blocks:
if (b.values == nb.values).all():
found = True
break
self.assertTrue(found)
for b in new_blocks:
found = False
for ob in old_blocks:
if (b.values == ob.values).all():
found = True
break
self.assertTrue(found)
# noops
mgr = create_mgr('f: i8; g: f8')
new_mgr = mgr.convert()
_compare(mgr, new_mgr)
mgr = create_mgr('a, b: object; f: i8; g: f8')
new_mgr = mgr.convert()
_compare(mgr, new_mgr)
# convert
mgr = create_mgr('a,b,foo: object; f: i8; g: f8')
mgr.set('a', np.array(['1'] * N, dtype=np.object_))
mgr.set('b', np.array(['2.'] * N, dtype=np.object_))
mgr.set('foo', np.array(['foo.'] * N, dtype=np.object_))
new_mgr = mgr.convert(numeric=True)
self.assertEqual(new_mgr.get('a').dtype, np.int64)
self.assertEqual(new_mgr.get('b').dtype, np.float64)
self.assertEqual(new_mgr.get('foo').dtype, np.object_)
self.assertEqual(new_mgr.get('f').dtype, np.int64)
self.assertEqual(new_mgr.get('g').dtype, np.float64)
mgr = create_mgr('a,b,foo: object; f: i4; bool: bool; dt: datetime;'
'i: i8; g: f8; h: f2')
mgr.set('a', np.array(['1'] * N, dtype=np.object_))
mgr.set('b', np.array(['2.'] * N, dtype=np.object_))
mgr.set('foo', np.array(['foo.'] * N, dtype=np.object_))
new_mgr = mgr.convert(numeric=True)
self.assertEqual(new_mgr.get('a').dtype, np.int64)
self.assertEqual(new_mgr.get('b').dtype, np.float64)
self.assertEqual(new_mgr.get('foo').dtype, np.object_)
self.assertEqual(new_mgr.get('f').dtype, np.int32)
self.assertEqual(new_mgr.get('bool').dtype, np.bool_)
self.assertEqual(new_mgr.get('dt').dtype.type, np.datetime64)
self.assertEqual(new_mgr.get('i').dtype, np.int64)
self.assertEqual(new_mgr.get('g').dtype, np.float64)
self.assertEqual(new_mgr.get('h').dtype, np.float16)
def test_interleave(self):
# self
for dtype in ['f8', 'i8', 'object', 'bool', 'complex', 'M8[ns]',
'm8[ns]']:
mgr = create_mgr('a: {0}'.format(dtype))
self.assertEqual(mgr.as_matrix().dtype, dtype)
mgr = create_mgr('a: {0}; b: {0}'.format(dtype))
self.assertEqual(mgr.as_matrix().dtype, dtype)
# will be converted according the actual dtype of the underlying
mgr = create_mgr('a: category')
self.assertEqual(mgr.as_matrix().dtype, 'i8')
mgr = create_mgr('a: category; b: category')
self.assertEqual(mgr.as_matrix().dtype, 'i8'),
mgr = create_mgr('a: category; b: category2')
self.assertEqual(mgr.as_matrix().dtype, 'object')
mgr = create_mgr('a: category2')
self.assertEqual(mgr.as_matrix().dtype, 'object')
mgr = create_mgr('a: category2; b: category2')
self.assertEqual(mgr.as_matrix().dtype, 'object')
# combinations
mgr = create_mgr('a: f8')
self.assertEqual(mgr.as_matrix().dtype, 'f8')
mgr = create_mgr('a: f8; b: i8')
self.assertEqual(mgr.as_matrix().dtype, 'f8')
mgr = create_mgr('a: f4; b: i8')
self.assertEqual(mgr.as_matrix().dtype, 'f4')
mgr = create_mgr('a: f4; b: i8; d: object')
self.assertEqual(mgr.as_matrix().dtype, 'object')
mgr = create_mgr('a: bool; b: i8')
self.assertEqual(mgr.as_matrix().dtype, 'object')
mgr = create_mgr('a: complex')
self.assertEqual(mgr.as_matrix().dtype, 'complex')
mgr = create_mgr('a: f8; b: category')
self.assertEqual(mgr.as_matrix().dtype, 'object')
mgr = create_mgr('a: M8[ns]; b: category')
self.assertEqual(mgr.as_matrix().dtype, 'object')
mgr = create_mgr('a: M8[ns]; b: bool')
self.assertEqual(mgr.as_matrix().dtype, 'object')
mgr = create_mgr('a: M8[ns]; b: i8')
self.assertEqual(mgr.as_matrix().dtype, 'object')
mgr = create_mgr('a: m8[ns]; b: bool')
self.assertEqual(mgr.as_matrix().dtype, 'object')
mgr = create_mgr('a: m8[ns]; b: i8')
self.assertEqual(mgr.as_matrix().dtype, 'object')
mgr = create_mgr('a: M8[ns]; b: m8[ns]')
self.assertEqual(mgr.as_matrix().dtype, 'object')
def test_interleave_non_unique_cols(self):
df = DataFrame([
[pd.Timestamp('20130101'), 3.5],
[pd.Timestamp('20130102'), 4.5]],
columns=['x', 'x'],
index=[1, 2])
df_unique = df.copy()
df_unique.columns = ['x', 'y']
self.assertEqual(df_unique.values.shape, df.values.shape)
tm.assert_numpy_array_equal(df_unique.values[0], df.values[0])
tm.assert_numpy_array_equal(df_unique.values[1], df.values[1])
def test_consolidate(self):
pass
def test_consolidate_ordering_issues(self):
self.mgr.set('f', randn(N))
self.mgr.set('d', randn(N))
self.mgr.set('b', randn(N))
self.mgr.set('g', randn(N))
self.mgr.set('h', randn(N))
# we have datetime/tz blocks in self.mgr
cons = self.mgr.consolidate()
self.assertEqual(cons.nblocks, 4)
cons = self.mgr.consolidate().get_numeric_data()
self.assertEqual(cons.nblocks, 1)
tm.assertIsInstance(cons.blocks[0].mgr_locs, lib.BlockPlacement)
tm.assert_numpy_array_equal(cons.blocks[0].mgr_locs.as_array,
np.arange(len(cons.items), dtype=np.int64))
def test_reindex_index(self):
pass
def test_reindex_items(self):
# mgr is not consolidated, f8 & f8-2 blocks
mgr = create_mgr('a: f8; b: i8; c: f8; d: i8; e: f8;'
'f: bool; g: f8-2')
reindexed = mgr.reindex_axis(['g', 'c', 'a', 'd'], axis=0)
self.assertEqual(reindexed.nblocks, 2)
tm.assert_index_equal(reindexed.items, pd.Index(['g', 'c', 'a', 'd']))
assert_almost_equal(
mgr.get('g', fastpath=False), reindexed.get('g', fastpath=False))
assert_almost_equal(
mgr.get('c', fastpath=False), reindexed.get('c', fastpath=False))
assert_almost_equal(
mgr.get('a', fastpath=False), reindexed.get('a', fastpath=False))
assert_almost_equal(
mgr.get('d', fastpath=False), reindexed.get('d', fastpath=False))
assert_almost_equal(
mgr.get('g').internal_values(),
reindexed.get('g').internal_values())
assert_almost_equal(
mgr.get('c').internal_values(),
reindexed.get('c').internal_values())
assert_almost_equal(
mgr.get('a').internal_values(),
reindexed.get('a').internal_values())
assert_almost_equal(
mgr.get('d').internal_values(),
reindexed.get('d').internal_values())
def test_multiindex_xs(self):
mgr = create_mgr('a,b,c: f8; d,e,f: i8')
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
mgr.set_axis(1, index)
result = mgr.xs('bar', axis=1)
self.assertEqual(result.shape, (6, 2))
self.assertEqual(result.axes[1][0], ('bar', 'one'))
self.assertEqual(result.axes[1][1], ('bar', 'two'))
def test_get_numeric_data(self):
mgr = create_mgr('int: int; float: float; complex: complex;'
'str: object; bool: bool; obj: object; dt: datetime',
item_shape=(3, ))
mgr.set('obj', np.array([1, 2, 3], dtype=np.object_))
numeric = mgr.get_numeric_data()
tm.assert_index_equal(numeric.items,
pd.Index(['int', 'float', 'complex', 'bool']))
assert_almost_equal(
mgr.get('float', fastpath=False), numeric.get('float',
fastpath=False))
assert_almost_equal(
mgr.get('float').internal_values(),
numeric.get('float').internal_values())
# Check sharing
numeric.set('float', np.array([100., 200., 300.]))
assert_almost_equal(
mgr.get('float', fastpath=False), np.array([100., 200., 300.]))
assert_almost_equal(
mgr.get('float').internal_values(), np.array([100., 200., 300.]))
numeric2 = mgr.get_numeric_data(copy=True)
tm.assert_index_equal(numeric.items,
pd.Index(['int', 'float', 'complex', 'bool']))
numeric2.set('float', np.array([1000., 2000., 3000.]))
assert_almost_equal(
mgr.get('float', fastpath=False), np.array([100., 200., 300.]))
assert_almost_equal(
mgr.get('float').internal_values(), np.array([100., 200., 300.]))
def test_get_bool_data(self):
mgr = create_mgr('int: int; float: float; complex: complex;'
'str: object; bool: bool; obj: object; dt: datetime',
item_shape=(3, ))
mgr.set('obj', np.array([True, False, True], dtype=np.object_))
bools = mgr.get_bool_data()
tm.assert_index_equal(bools.items, pd.Index(['bool']))
assert_almost_equal(mgr.get('bool', fastpath=False),
bools.get('bool', fastpath=False))
assert_almost_equal(
mgr.get('bool').internal_values(),
bools.get('bool').internal_values())
bools.set('bool', np.array([True, False, True]))
tm.assert_numpy_array_equal(mgr.get('bool', fastpath=False),
np.array([True, False, True]))
tm.assert_numpy_array_equal(mgr.get('bool').internal_values(),
np.array([True, False, True]))
# Check sharing
bools2 = mgr.get_bool_data(copy=True)
bools2.set('bool', np.array([False, True, False]))
tm.assert_numpy_array_equal(mgr.get('bool', fastpath=False),
np.array([True, False, True]))
tm.assert_numpy_array_equal(mgr.get('bool').internal_values(),
np.array([True, False, True]))
def test_unicode_repr_doesnt_raise(self):
repr(create_mgr(u('b,\u05d0: object')))
def test_missing_unicode_key(self):
df = DataFrame({"a": [1]})
try:
df.ix[:, u("\u05d0")] # should not raise UnicodeEncodeError
except KeyError:
pass # this is the expected exception
def test_equals(self):
# unique items
bm1 = create_mgr('a,b,c: i8-1; d,e,f: i8-2')
bm2 = BlockManager(bm1.blocks[::-1], bm1.axes)
self.assertTrue(bm1.equals(bm2))
bm1 = create_mgr('a,a,a: i8-1; b,b,b: i8-2')
bm2 = BlockManager(bm1.blocks[::-1], bm1.axes)
self.assertTrue(bm1.equals(bm2))
def test_equals_block_order_different_dtypes(self):
# GH 9330
mgr_strings = [
"a:i8;b:f8", # basic case
"a:i8;b:f8;c:c8;d:b", # many types
"a:i8;e:dt;f:td;g:string", # more types
"a:i8;b:category;c:category2;d:category2", # categories
"c:sparse;d:sparse_na;b:f8", # sparse
]
for mgr_string in mgr_strings:
bm = create_mgr(mgr_string)
block_perms = itertools.permutations(bm.blocks)
for bm_perm in block_perms:
bm_this = BlockManager(bm_perm, bm.axes)
self.assertTrue(bm.equals(bm_this))
self.assertTrue(bm_this.equals(bm))
def test_single_mgr_ctor(self):
mgr = create_single_mgr('f8', num_rows=5)
self.assertEqual(mgr.as_matrix().tolist(), [0., 1., 2., 3., 4.])
class TestIndexing(object):
# Nosetests-style data-driven tests.
#
# This test applies different indexing routines to block managers and
# compares the outcome to the result of same operations on np.ndarray.
#
# NOTE: sparse (SparseBlock with fill_value != np.nan) fail a lot of tests
# and are disabled.
MANAGERS = [
create_single_mgr('f8', N),
create_single_mgr('i8', N),
# create_single_mgr('sparse', N),
create_single_mgr('sparse_na', N),
# 2-dim
create_mgr('a,b,c,d,e,f: f8', item_shape=(N,)),
create_mgr('a,b,c,d,e,f: i8', item_shape=(N,)),
create_mgr('a,b: f8; c,d: i8; e,f: string', item_shape=(N,)),
create_mgr('a,b: f8; c,d: i8; e,f: f8', item_shape=(N,)),
# create_mgr('a: sparse', item_shape=(N,)),
create_mgr('a: sparse_na', item_shape=(N,)),
# 3-dim
create_mgr('a,b,c,d,e,f: f8', item_shape=(N, N)),
create_mgr('a,b,c,d,e,f: i8', item_shape=(N, N)),
create_mgr('a,b: f8; c,d: i8; e,f: string', item_shape=(N, N)),
create_mgr('a,b: f8; c,d: i8; e,f: f8', item_shape=(N, N)),
# create_mgr('a: sparse', item_shape=(1, N)),
]
# MANAGERS = [MANAGERS[6]]
def test_get_slice(self):
def assert_slice_ok(mgr, axis, slobj):
# import pudb; pudb.set_trace()
mat = mgr.as_matrix()
# we maybe using an ndarray to test slicing and
# might not be the full length of the axis
if isinstance(slobj, np.ndarray):
ax = mgr.axes[axis]
if len(ax) and len(slobj) and len(slobj) != len(ax):
slobj = np.concatenate([slobj, np.zeros(
len(ax) - len(slobj), dtype=bool)])
sliced = mgr.get_slice(slobj, axis=axis)
mat_slobj = (slice(None), ) * axis + (slobj, )
tm.assert_numpy_array_equal(mat[mat_slobj], sliced.as_matrix(),
check_dtype=False)
tm.assert_index_equal(mgr.axes[axis][slobj], sliced.axes[axis])
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
# slice
yield assert_slice_ok, mgr, ax, slice(None)
yield assert_slice_ok, mgr, ax, slice(3)
yield assert_slice_ok, mgr, ax, slice(100)
yield assert_slice_ok, mgr, ax, slice(1, 4)
yield assert_slice_ok, mgr, ax, slice(3, 0, -2)
# boolean mask
yield assert_slice_ok, mgr, ax, np.array([], dtype=np.bool_)
yield (assert_slice_ok, mgr, ax,
np.ones(mgr.shape[ax], dtype=np.bool_))
yield (assert_slice_ok, mgr, ax,
np.zeros(mgr.shape[ax], dtype=np.bool_))
if mgr.shape[ax] >= 3:
yield (assert_slice_ok, mgr, ax,
np.arange(mgr.shape[ax]) % 3 == 0)
yield (assert_slice_ok, mgr, ax, np.array(
[True, True, False], dtype=np.bool_))
# fancy indexer
yield assert_slice_ok, mgr, ax, []
yield assert_slice_ok, mgr, ax, lrange(mgr.shape[ax])
if mgr.shape[ax] >= 3:
yield assert_slice_ok, mgr, ax, [0, 1, 2]
yield assert_slice_ok, mgr, ax, [-1, -2, -3]
def test_take(self):
def assert_take_ok(mgr, axis, indexer):
mat = mgr.as_matrix()
taken = mgr.take(indexer, axis)
tm.assert_numpy_array_equal(np.take(mat, indexer, axis),
taken.as_matrix(), check_dtype=False)
tm.assert_index_equal(mgr.axes[axis].take(indexer),
taken.axes[axis])
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
# take/fancy indexer
yield assert_take_ok, mgr, ax, []
yield assert_take_ok, mgr, ax, [0, 0, 0]
yield assert_take_ok, mgr, ax, lrange(mgr.shape[ax])
if mgr.shape[ax] >= 3:
yield assert_take_ok, mgr, ax, [0, 1, 2]
yield assert_take_ok, mgr, ax, [-1, -2, -3]
def test_reindex_axis(self):
def assert_reindex_axis_is_ok(mgr, axis, new_labels, fill_value):
mat = mgr.as_matrix()
indexer = mgr.axes[axis].get_indexer_for(new_labels)
reindexed = mgr.reindex_axis(new_labels, axis,
fill_value=fill_value)
tm.assert_numpy_array_equal(algos.take_nd(mat, indexer, axis,
fill_value=fill_value),
reindexed.as_matrix(),
check_dtype=False)
tm.assert_index_equal(reindexed.axes[axis], new_labels)
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
for fill_value in (None, np.nan, 100.):
yield (assert_reindex_axis_is_ok, mgr, ax,
pd.Index([]), fill_value)
yield (assert_reindex_axis_is_ok, mgr, ax, mgr.axes[ax],
fill_value)
yield (assert_reindex_axis_is_ok, mgr, ax,
mgr.axes[ax][[0, 0, 0]], fill_value)
yield (assert_reindex_axis_is_ok, mgr, ax,
pd.Index(['foo', 'bar', 'baz']), fill_value)
yield (assert_reindex_axis_is_ok, mgr, ax,
pd.Index(['foo', mgr.axes[ax][0], 'baz']),
fill_value)
if mgr.shape[ax] >= 3:
yield (assert_reindex_axis_is_ok, mgr, ax,
mgr.axes[ax][:-3], fill_value)
yield (assert_reindex_axis_is_ok, mgr, ax,
mgr.axes[ax][-3::-1], fill_value)
yield (assert_reindex_axis_is_ok, mgr, ax,
mgr.axes[ax][[0, 1, 2, 0, 1, 2]], fill_value)
def test_reindex_indexer(self):
def assert_reindex_indexer_is_ok(mgr, axis, new_labels, indexer,
fill_value):
mat = mgr.as_matrix()
reindexed_mat = algos.take_nd(mat, indexer, axis,
fill_value=fill_value)
reindexed = mgr.reindex_indexer(new_labels, indexer, axis,
fill_value=fill_value)
tm.assert_numpy_array_equal(reindexed_mat,
reindexed.as_matrix(),
check_dtype=False)
tm.assert_index_equal(reindexed.axes[axis], new_labels)
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
for fill_value in (None, np.nan, 100.):
yield (assert_reindex_indexer_is_ok, mgr, ax,
pd.Index([]), [], fill_value)
yield (assert_reindex_indexer_is_ok, mgr, ax,
mgr.axes[ax], np.arange(mgr.shape[ax]), fill_value)
yield (assert_reindex_indexer_is_ok, mgr, ax,
pd.Index(['foo'] * mgr.shape[ax]),
np.arange(mgr.shape[ax]), fill_value)
yield (assert_reindex_indexer_is_ok, mgr, ax,
mgr.axes[ax][::-1], np.arange(mgr.shape[ax]),
fill_value)
yield (assert_reindex_indexer_is_ok, mgr, ax, mgr.axes[ax],
np.arange(mgr.shape[ax])[::-1], fill_value)
yield (assert_reindex_indexer_is_ok, mgr, ax,
pd.Index(['foo', 'bar', 'baz']),
[0, 0, 0], fill_value)
yield (assert_reindex_indexer_is_ok, mgr, ax,
pd.Index(['foo', 'bar', 'baz']),
[-1, 0, -1], fill_value)
yield (assert_reindex_indexer_is_ok, mgr, ax,
pd.Index(['foo', mgr.axes[ax][0], 'baz']),
[-1, -1, -1], fill_value)
if mgr.shape[ax] >= 3:
yield (assert_reindex_indexer_is_ok, mgr, ax,
pd.Index(['foo', 'bar', 'baz']),
[0, 1, 2], fill_value)
# test_get_slice(slice_like, axis)
# take(indexer, axis)
# reindex_axis(new_labels, axis)
# reindex_indexer(new_labels, indexer, axis)
class TestBlockPlacement(tm.TestCase):
_multiprocess_can_split_ = True
def test_slice_len(self):
self.assertEqual(len(BlockPlacement(slice(0, 4))), 4)
self.assertEqual(len(BlockPlacement(slice(0, 4, 2))), 2)
self.assertEqual(len(BlockPlacement(slice(0, 3, 2))), 2)
self.assertEqual(len(BlockPlacement(slice(0, 1, 2))), 1)
self.assertEqual(len(BlockPlacement(slice(1, 0, -1))), 1)
def test_zero_step_raises(self):
self.assertRaises(ValueError, BlockPlacement, slice(1, 1, 0))
self.assertRaises(ValueError, BlockPlacement, slice(1, 2, 0))
def test_unbounded_slice_raises(self):
def assert_unbounded_slice_error(slc):
self.assertRaisesRegexp(ValueError, "unbounded slice",
lambda: BlockPlacement(slc))
assert_unbounded_slice_error(slice(None, None))
assert_unbounded_slice_error(slice(10, None))
assert_unbounded_slice_error(slice(None, None, -1))
assert_unbounded_slice_error(slice(None, 10, -1))
# These are "unbounded" because negative index will change depending on
# container shape.
assert_unbounded_slice_error(slice(-1, None))
assert_unbounded_slice_error(slice(None, -1))
assert_unbounded_slice_error(slice(-1, -1))
assert_unbounded_slice_error(slice(-1, None, -1))
assert_unbounded_slice_error(slice(None, -1, -1))
assert_unbounded_slice_error(slice(-1, -1, -1))
def test_not_slice_like_slices(self):
def assert_not_slice_like(slc):
self.assertTrue(not BlockPlacement(slc).is_slice_like)
assert_not_slice_like(slice(0, 0))
assert_not_slice_like(slice(100, 0))
assert_not_slice_like(slice(100, 100, -1))
assert_not_slice_like(slice(0, 100, -1))
self.assertTrue(not BlockPlacement(slice(0, 0)).is_slice_like)
self.assertTrue(not BlockPlacement(slice(100, 100)).is_slice_like)
def test_array_to_slice_conversion(self):
def assert_as_slice_equals(arr, slc):
self.assertEqual(BlockPlacement(arr).as_slice, slc)
assert_as_slice_equals([0], slice(0, 1, 1))
assert_as_slice_equals([100], slice(100, 101, 1))
assert_as_slice_equals([0, 1, 2], slice(0, 3, 1))
assert_as_slice_equals([0, 5, 10], slice(0, 15, 5))
assert_as_slice_equals([0, 100], slice(0, 200, 100))
assert_as_slice_equals([2, 1], slice(2, 0, -1))
assert_as_slice_equals([2, 1, 0], slice(2, None, -1))
assert_as_slice_equals([100, 0], slice(100, None, -100))
def test_not_slice_like_arrays(self):
def assert_not_slice_like(arr):
self.assertTrue(not BlockPlacement(arr).is_slice_like)
assert_not_slice_like([])
assert_not_slice_like([-1])
assert_not_slice_like([-1, -2, -3])
assert_not_slice_like([-10])
assert_not_slice_like([-1])
assert_not_slice_like([-1, 0, 1, 2])
assert_not_slice_like([-2, 0, 2, 4])
assert_not_slice_like([1, 0, -1])
assert_not_slice_like([1, 1, 1])
def test_slice_iter(self):
self.assertEqual(list(BlockPlacement(slice(0, 3))), [0, 1, 2])
self.assertEqual(list(BlockPlacement(slice(0, 0))), [])
self.assertEqual(list(BlockPlacement(slice(3, 0))), [])
self.assertEqual(list(BlockPlacement(slice(3, 0, -1))), [3, 2, 1])
self.assertEqual(list(BlockPlacement(slice(3, None, -1))),
[3, 2, 1, 0])
def test_slice_to_array_conversion(self):
def assert_as_array_equals(slc, asarray):
tm.assert_numpy_array_equal(
BlockPlacement(slc).as_array,
np.asarray(asarray, dtype=np.int64))
assert_as_array_equals(slice(0, 3), [0, 1, 2])
assert_as_array_equals(slice(0, 0), [])
assert_as_array_equals(slice(3, 0), [])
assert_as_array_equals(slice(3, 0, -1), [3, 2, 1])
assert_as_array_equals(slice(3, None, -1), [3, 2, 1, 0])
assert_as_array_equals(slice(31, None, -10), [31, 21, 11, 1])
def test_blockplacement_add(self):
bpl = BlockPlacement(slice(0, 5))
self.assertEqual(bpl.add(1).as_slice, slice(1, 6, 1))
self.assertEqual(bpl.add(np.arange(5)).as_slice, slice(0, 10, 2))
self.assertEqual(list(bpl.add(np.arange(5, 0, -1))), [5, 5, 5, 5, 5])
def test_blockplacement_add_int(self):
def assert_add_equals(val, inc, result):
self.assertEqual(list(BlockPlacement(val).add(inc)), result)
assert_add_equals(slice(0, 0), 0, [])
assert_add_equals(slice(1, 4), 0, [1, 2, 3])
assert_add_equals(slice(3, 0, -1), 0, [3, 2, 1])
assert_add_equals(slice(2, None, -1), 0, [2, 1, 0])
assert_add_equals([1, 2, 4], 0, [1, 2, 4])
assert_add_equals(slice(0, 0), 10, [])
assert_add_equals(slice(1, 4), 10, [11, 12, 13])
assert_add_equals(slice(3, 0, -1), 10, [13, 12, 11])
assert_add_equals(slice(2, None, -1), 10, [12, 11, 10])
assert_add_equals([1, 2, 4], 10, [11, 12, 14])
assert_add_equals(slice(0, 0), -1, [])
assert_add_equals(slice(1, 4), -1, [0, 1, 2])
assert_add_equals(slice(3, 0, -1), -1, [2, 1, 0])
assert_add_equals([1, 2, 4], -1, [0, 1, 3])
self.assertRaises(ValueError,
lambda: BlockPlacement(slice(1, 4)).add(-10))
self.assertRaises(ValueError,
lambda: BlockPlacement([1, 2, 4]).add(-10))
self.assertRaises(ValueError,
lambda: BlockPlacement(slice(2, None, -1)).add(-1))
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| apache-2.0 |
fivejjs/copperhead | samples/laplace.py | 5 | 1287 | from copperhead import *
from numpy import zeros
@cu
def initialize(N):
nx, ny = N
def el(i):
y = i / nx
if y==0:
return 1.0
else:
return 0.0
return map(el, range(nx * ny))
@cu
def solve(u, N, D2, it):
nx, ny = N
dx2, dy2 = D2
def el(i):
x = i % nx
y = i / nx
if x == 0 or x == nx-1 or y == 0 or y == ny-1:
return u[i]
else:
return ((u[i-1]+u[i+1])*dy2 + \
(u[i-nx]+u[i+nx])*dx2)/(2*(dx2+dy2))
if it > 0:
u = map(el, indices(u))
return solve(u, N, D2, it-1)
else:
return u
dx = 0.1
dy = 0.1
dx2 = dx*dx
dy2 = dy*dy
N = (100,100)
D2 = (dx2, dy2)
p = runtime.places.default_place
with p:
u = initialize(N)
print("starting timer")
import time
start = time.time()
#Solve
u = solve(u, N, D2, 8000)
#Force result to be finalized at execution place
#Otherwise, timing loop may not account for all computation
u = force(u, p)
end = time.time()
print("Computation time: %s seconds" %(end - start))
result = np.reshape(to_numpy(u), N)
try:
import matplotlib.pyplot as plt
plt.imshow(result)
plt.show()
except:
pass
| apache-2.0 |
gcanasherrera/Weak-Lensing | WL_Script.py | 1 | 23947 | # Name: WL_Script.py
#
# Weak-Lensing "Study of Systematics and Classification of Compact Objects" Program I
#
# Type: python script
#
# Description: Central script that develops the whole process of reading images, filtering into galaxies and stars, correcting sizes and shapes, correcting PSF annisotropies, and re-classify compact objects into galaxies to obtain a final catalogue
#
# Returns: FITS image - mass-density map
# Catalogs
# Plots
# FITS image - trial from Source Extractor
#
__author__ = "Guadalupe Canas Herrera"
__copyright__ = "Copyright (C) 2015 G. Canas Herrera"
__license__ = "Public Domain"
__version__ = "4.0.0"
__maintainer__ = "Guadalupe Canas"
__email__ = "[email protected]"
# Improvements: more automatic ---> only needs the name of the picture, the catalogue (in case you have it) and the BAND you want to analize
# Old CatalogPlotter3.py has been splitted in two: WL_Script.py and WL_Utils.py
# Also call: WL_utils.py, WL_filter_mag_gal.py - WL_ellip_fitter.py (written by Guadalupe Canas Herrera)
# Also call 2: Source Extractor (by Emmanuel Bertin V2.3.2), sex2fiat (by DAVID WITTMAN v1.2), fiatfilter (by DAVID WITTMAN v1.2), ellipto (by DAVID WITTMAN v1.2), dlscombine (by DAVID WITTMAN v1.2 and modified by GUADALUPE CANAS)
#
# DLSCOMBINE CORRECTS PSF: it has a dependence in fiat.c, fiat.h, dlscombine_utils.c, dlscombine.c, dlscombine.h
# Guadalupe Canas Herrera modified fiat.c, dlscombine_utils.c, dlscombine.h
#
import matplotlib.pyplot as plt #Works for making python behave like matlab
#import sextutils as sex #Program used to read the original catalog
import numpy as np #Maths arrays and more
import numpy.ma as ma #Masking arrays
import sys #Strings inputs
import math #mathematical functions
import subprocess #calling to the terminal
from astropy.modeling import models, fitting #Package for fitting Legendre Polynomials
import warnings #Advices
from mpl_toolkits.mplot3d import Axes3D #Plotting in 3D
import WL_ellip_fitter as ellip_fit #Ellipticity fitting
from WL_Utils import sex_caller, sex_caller_corrected, ellipto_caller, dlscombine_pol_caller, dlscombine_leg_caller, ds9_caller, plotter, ellipticity, specfile, stars_maker, galaxies_maker, specfile_r, specfile_z
from WL_filter_mag_gal import filter_mag #Filtering final catalog of galaxies a function of magnitudes and call fiatmap
import seaborn as sns
import matplotlib.pylab as P #histograms
from Class_CrossMatching import CrossMatching
from Class_CatalogReader import CatalogReader
############################### BEGIN SCRIPT ###############################
# (1): We define the ending of the input/output files
type_fits = ".fits"
type_cat = ".cat"
type_fcat = ".fcat"
type_good = "_good.fcat"
type_galaxies = "_galaxies.fcat"
type_stars = "_stars.fcat"
type_ellipto_galaxies = "_ellipto_galaxies.fcat"
type_ellipto_stars = "_ellipto_stars.fcat"
type_shapes_galaxies = "_shapes_galaxies.fcat"
type_shapes_stars = "_shapes_stars.fcat"
type_match = "_match.fcat"
def main():
sns.set(style="white", palette="muted", color_codes=True)
print("Welcome to the Weak-Lensing Script, here to help you analizing Subaru images in search of galaxy clusters")
print("")
array_file_name = []
# (1): Ask the number of image that did the cross-matching process.
question = int(raw_input("Please, tell me how many pictures did the cross-matching: "))
cont = 0
BEFORE_NAME = ''
FILE_NAME = ''
#print FILE_NAME
FILE_NAME_CORRECTED= ''
while cont < question:
# (2): We need to read the image and band. We ask in screen the image of the region of the sky.
filter =raw_input("Introduce the name of the filter: ")
fits = raw_input("Please, introduce the name of the fits image you want to read or directly the catalogue: ")
#Save the name of the .fits and .cat in a string:
BEFORE_NAME = fits.find('.')
FILE_NAME = fits[:BEFORE_NAME]
#print FILE_NAME
FILE_NAME_CORRECTED='{}_corrected'.format(FILE_NAME)
if fits.endswith(type_fits):
#(3) STEP: Call Source Extractor
print("Let me call Source Extractor (called sex by friends). It will obtain the celestial objects. When it finishes I will show you the trial image")
print("")
catalog_name = sex_caller(fits, FILE_NAME)
#Show results of trial.fits
#subprocess.call('./ds9 {}_trial.fits'.format(FILE_NAME), shell=True)
#(4): Transform Source Extractor catalog into FIAT FORMAT
print("I'm transforming the catalog into a FIAT 1.0 format")
print("")
catalog_name_fiat= '{}.fcat'.format(FILE_NAME)
transform_into_fiat='perl sex2fiat.pl {}>{}'.format(catalog_name, catalog_name_fiat)
subprocess.call(transform_into_fiat, shell=True)
if fits.endswith(type_fcat):
catalog_name_fiat = fits
fits = raw_input("Please, introduce the name of the fits image: ")
#(5): Read the FIAT Catalog
FWHM_max_stars=0
names = ["number", "flux_iso", "fluxerr_iso", "mag_iso", "magger_iso", "mag_aper_1", "magerr_aper_1", "mag", "magger", "flux_max", "isoarea", "x", "y", "ra", "dec", "ixx", "iyy", "ixy", "ixxWIN", "iyyWIN", "ixyWIN", "A", "B", "theta", "enlogation", "ellipticity", "FWHM", "flags", "class_star"]
fcat = np.genfromtxt(catalog_name_fiat, names=names)
P.figure()
P.hist(fcat['class_star'], 50, normed=1, histtype='stepfilled')
P.show()
#Let's fix the ellipcity + and - for all celestial objects
#(6): plot FWHM vs mag_iso
print("I'm ploting MAG_ISO vs. FWHM")
magnitude1='mag_iso'
magnitude2='FWHM'
plotter(fcat, magnitude1, magnitude2, 2, '$mag(iso)$', '$FWHM/pixels$')
plt.show()
print("Do you want to fix axis limits? Please answer with y or n")
answer=raw_input()
if answer== "y":
xmin=float(raw_input("X min: "))
xmax=float(raw_input("X max: "))
ymin=float(raw_input("Y min: "))
ymax=float(raw_input("Y max: "))
#Fix limits
plotter(catalog_name, magnitude1, magnitude2, 3)
plt.xlim(xmin,xmax)
plt.ylim(ymin,ymax)
plt.show(block=False)
elif answer == "n":
plt.show(block=False)
else:
plt.show(block=False)
# (7): Obtaining a GOOD CATALOG without blank spaces and filter saturate objects
print("This catalog is not the good one. I'll show you why")
print("")
magnitude_x="x"
magnitude_y="y"
plotter(fcat, magnitude_x, magnitude_y, 4, '$x/pixels$', '$y/pixels$')
plt.show(block=False)
print("Please, introduce the values you prefer to bound x and y")
xmin_good=float(raw_input("X min: "))
xmax_good=float(raw_input("X max: "))
ymin_good=float(raw_input("Y min: "))
ymax_good=float(raw_input("Y max: "))
catalog_name_good= '{}{}'.format(FILE_NAME, type_good)
terminal_good= 'perl fiatfilter.pl "x>{} && x<{} && y>{} && y<{} && FLUX_ISO<3000000" {}>{}'.format(xmin_good, xmax_good, ymin_good, ymax_good, catalog_name_fiat, catalog_name_good)
subprocess.call(terminal_good, shell=True)
print("Wait a moment, I'm showing you the results in a sec")
fcat_good = np.genfromtxt(catalog_name_good, names=names)
print np.amax(fcat_good['flux_iso'])
plotter(fcat_good, 'x', 'y', 5, '$x/pixels$', '$y/pixels$')
plt.show(block=False)
ellipticity(fcat_good, 1)
plt.show(block=False)
plotter(fcat_good, magnitude1, magnitude2, 2, '$mag(iso)$', '$FWHM/pixels$')
plt.show(block=False)
#(8.1.): Creating STARS CATALOG
print("Let's obtain only a FIAT catalog that contains stars. We need to bound. Have a look to the FWHM vs Mag_ISO plot")
mag_iso_min_stars=float(raw_input("Enter the minimum value for mag_iso: "))
mag_iso_max_stars=float(raw_input("Enter the maximum value for mag_iso: "))
FWHM_min_stars=float(raw_input("Enter the minimum value for FWHM: "))
FWHM_max_stars=float(raw_input("Enter the maximum value for FWHM: "))
catalog_name_stars= '{}{}'.format(FILE_NAME, type_stars)
#Creamos un string para que lo ponga en la terminal
terminal_stars= 'perl fiatfilter.pl "MAG_ISO>{} && MAG_ISO<{} && FWHM>{} && FWHM<{} && CLASS_STAR>0.9 && FLUX_ISO<3000000" {}>{}'.format(mag_iso_min_stars, mag_iso_max_stars, FWHM_min_stars, FWHM_max_stars, catalog_name_good, catalog_name_stars)
subprocess.call(terminal_stars, shell=True)
fcat_stars=np.genfromtxt(catalog_name_stars, names=names)
ellipticity(fcat_stars, 6)
plt.show(block=False)
#(8.2.): Checking STARS CATALOG with Source Extractor Neural Network Output
P.figure()
P.hist(fcat_stars['class_star'], 50, normed=1, histtype='stepfilled')
P.show(block=False)
#(9.1.): Creating GALAXIES CATALOG
print("Let's obtain only a FIAT catalog that contains galaxies. We need to bound. Have a look to the FWHM vs Mag_ISO plot")
print("")
print("First, I'm going to perform a linear fit. Tell me the values of mag_iso")
mag_iso_min_galaxies=float(raw_input("Enter the minimum value for mag_iso: "))
mag_iso_max_galaxies=float(raw_input("Enter the maximum value for mag_iso: "))
catalog_name_fit='{}_fit{}'.format(FILE_NAME, type_galaxies)
#Creamos un string para que lo ponga en la terminal
terminal_fit= 'perl fiatfilter.pl -v "MAG_ISO>{} && MAG_ISO<{}" {}>{}'.format(mag_iso_min_galaxies, mag_iso_max_galaxies, catalog_name_good, catalog_name_fit)
subprocess.call(terminal_fit, shell=True)
fcat_fit = np.genfromtxt(catalog_name_fit, names=names)
fit=np.polyfit(fcat_fit['mag_iso'], fcat_fit['FWHM'], 1)
#Save in variables the values of the fitting
m=fit[0]
n=fit[1]
print 'The value of the y-intercep n={} and the value of the slope m={}'.format(n,m)
# Once you have the values of the fitting we can obtain the catalog of galaxies
catalog_name_galaxies= '{}{}'.format(FILE_NAME, type_galaxies)
#terminal_galaxies= 'perl fiatfilter.pl -v "FWHM>{}*MAG_ISO+{} && FWHM>{} && CLASS_STAR<0.1 && FLUX_ISO<3000000" {}>{}'.format(m, n, FWHM_max_stars, catalog_name_good, catalog_name_galaxies)
terminal_galaxies= 'perl fiatfilter.pl -v "FWHM>{}*MAG_ISO+{} && FWHM>{} && FLUX_ISO<3000000" {}>{}'.format(m, n, FWHM_max_stars, catalog_name_good, catalog_name_galaxies)
subprocess.call(terminal_galaxies, shell=True)
fcat_galaxies=np.genfromtxt(catalog_name_galaxies, names=names)
#subprocess.call('./fiatreview {} {}'.format(fits, catalog_name_galaxies), shell=True)
magnitude1='mag_iso'
magnitude2='FWHM'
plotter(fcat_good, magnitude1, magnitude2, 2, '$mag(iso)$', '$FWHM/pixels$')
mag_th= np.linspace(1, 30, 1000)
p = np.poly1d(fit)
plt.plot(mag_th, p(mag_th), 'b-')
plt.show()
ellipticity(fcat_galaxies, 9)
plt.show()
#(9.2.): Checking GALAXIES CATALOG with Source Extractor Neural Network Output
P.figure()
P.hist(fcat_galaxies['class_star'], 50, normed=1, histtype='stepfilled')
P.show(block=False)
# (***) CHECKING FOR STARS // GALAXIES DIVISION
weights_stars=np.ones_like(fcat_stars['class_star'])/len(fcat_stars['class_star'])
weights_galaxies=np.ones_like(fcat_galaxies['class_star'])/len(fcat_galaxies['class_star'])
weights_all = np.ones_like(fcat_good['class_star'])/len(fcat_good['class_star'])
plt.figure()
plt.hist(fcat_stars['class_star'], weights = weights_stars, bins= 5, histtype='stepfilled', label ='stars')
plt.hist(fcat_galaxies['class_star'], weights = weights_galaxies, bins= 5, histtype='stepfilled', label ='galaxies')
plt.legend(loc='upper right')
plt.xlabel('$class_{star}$', labelpad=20, fontsize=20)
plt.ylabel('$Frequency$', fontsize=20)
plt.ylim(0,0.6)
plt.show()
plt.hist(fcat_good['class_star'], color= 'r', weights = weights_all, bins=50, histtype='stepfilled', label ='all')
plt.legend(loc='upper right')
plt.xlabel('$class_{star}$', labelpad=20, fontsize=20)
plt.ylabel('$Frequency$', fontsize=20)
plt.ylim(0,0.6)
plt.show()
plt.show()
#(10): Calling Ellipto to recalculate shapes and ellipticities: ELLIPTO CATALOG
print("")
print("Now it is necessary to call ellipto in order to obtain in a proper way sizes and shapes both for galaxies and stars")
print("")
print("STARS")
print("")
catalog_name_ellipto_stars='{}{}'.format(FILE_NAME, type_ellipto_stars)
ellipto_caller(catalog_name_stars, fits, catalog_name_ellipto_stars)
print("GALAXIES")
catalog_name_ellipto_galaxies='{}{}'.format(FILE_NAME, type_ellipto_galaxies)
ellipto_caller(catalog_name_galaxies, fits, catalog_name_ellipto_galaxies)
print("DONE")
print("")
#(11): Now we clasify the catalogs obtained with ellipto forcing fiat filter: SHAPES CATALOG
print("Filtering good obtained celestial object from ellipto using fiatfilter...")
print("")
print("STARS")
catalog_name_shapes_stars='{}{}'.format(FILE_NAME, type_shapes_stars)
fiatfilter_errcode_stars='perl fiatfilter.pl -v "errcode<2" {}>{}'.format(catalog_name_ellipto_stars, catalog_name_shapes_stars)
subprocess.call(fiatfilter_errcode_stars, shell=True)
print("")
print("GALAXIES")
print("")
catalog_name_shapes_galaxies='{}{}'.format(FILE_NAME, type_shapes_galaxies)
fiatfilter_errcode_galaxies='perl fiatfilter.pl -v "errcode<2" {}>{}'.format(catalog_name_ellipto_galaxies, catalog_name_shapes_galaxies)
subprocess.call(fiatfilter_errcode_galaxies, shell=True)
print("DONE")
print("")
#(12): Recalculating ellipticities for stars
print("I'm recalculating ellipticities of the new star set after being corrected by ellipto")
names_ellipto = ["x", "y", "mag_iso", "median", "ixx", "iyy", "ixy", "a_input", "b_input", "theta", "ellipticity", "errcode", "sigsky", "size", "flux", "mean_rho_4th", "sigma_e", "wander"]
fiat_shapes_stars= np.genfromtxt(catalog_name_shapes_stars, names=names_ellipto)
ellipticity(fiat_shapes_stars, 15)
plt.show()
print "Show ellipticy as a function of x and y"
plotter(fiat_shapes_stars, 'x', 'ellipticity', 2, '$x/pixels$', '$\epsilon$')
plt.show()
plotter(fiat_shapes_stars, 'y', 'ellipticity', 2, '$y/pixels$', '$\epsilon$')
plt.show()
fiat_shapes_galaxies= np.genfromtxt(catalog_name_shapes_galaxies, names=names_ellipto)
ellipticity(fiat_shapes_galaxies, 15)
plt.show(block=False)
#(13): STARS--> you obtain two fitting both for ellip_1 and ellip_2
print("")
print("I'm performing a fitting of those ellipticities e_1 and e_2: both a simple 2D polynomial fitting and a 2D Legendre Polynomial fitting")
print("")
dlscombine_file_pol=''
dlscombine_file_leg=''
#Let's call the function fit_Polynomial from ellip_fitting3.py
fitting_file_ellip_pol=ellip_fit.fit_Polynomial(FILE_NAME, fiat_shapes_stars)
#Create file read by dlscombine
dlscombine_file_pol=specfile(fits, fitting_file_ellip_pol, FILE_NAME)
print("")
#Let's call the function fit_Legendre from ellip_fitting3.py
fitting_file_ellip_leg=ellip_fit.fit_Legendre(FILE_NAME, fiat_shapes_stars)
#Create file read by dlscombine
if filter=='r':
dlscombine_file_leg=specfile_r(fits, fitting_file_ellip_leg, FILE_NAME)
if filter=='z':
dlscombine_file_leg=specfile_z(fits, fitting_file_ellip_leg, FILE_NAME)
#(14): Let's call DLSCOMBINE to correct PSF anisotropies
print("I'm correcting PSF anisotropies using dlscombine: BOTH FOR POL AND LEG FITTING")
print("")
fits_pol='{}_corrected_pol.fits'.format(FILE_NAME, FILE_NAME)
dlscombine_call_pol='./dlscombine_pol {} {}'.format(dlscombine_file_pol, fits_pol)
subprocess.call(dlscombine_call_pol, shell=True)
fits_leg='{}_corrected_leg.fits'.format(FILE_NAME, FILE_NAME)
dlscombine_call_leg='./dlscombine_leg {} {}'.format(dlscombine_file_leg, fits_leg)
subprocess.call(dlscombine_call_leg, shell=True)
#(15): Call again Source Extractor only for the Legendre Polynomial fitting
print("I'm calling again SExtractor to obtain a new catalog from the corrected picture (only from the leg fitting)")
print("")
catalog_name_corrected=sex_caller_corrected(fits_leg, FILE_NAME)
#(16): Transform .cat into .fcat (FIAT) for the corrected catalog
catalog_name_fiat_corrected='{}_corrected.fcat'.format(FILE_NAME)
transform_into_fiat_corrected='perl sex2fiat.pl {}>{}'.format(catalog_name_corrected, catalog_name_fiat_corrected)
subprocess.call(transform_into_fiat_corrected, shell=True)
print("")
array_file_name.append(catalog_name_fiat_corrected)
cont = cont + 1
NAME_1= array_file_name[0]
NAME_2= array_file_name[1]
BEFORE_NAME_1 = NAME_1.find('.')
FILE_NAME_1 = NAME_1[:BEFORE_NAME]
BEFORE_NAME_2 = NAME_2.find('.')
FILE_NAME_2 = NAME_2[:BEFORE_NAME]
#CROSS-MATCHING
catag_r = CatalogReader(array_file_name[0])
catag_r.read()
catag_z = CatalogReader(array_file_name[1])
catag_z.read()
crossmatching = CrossMatching(catag_r.fcat, catag_z.fcat)
crossmatching.kdtree(n=1*1e-06)
crossmatching.catalog_writter('2CM_{}'.format(FILE_NAME_1), compare = '1to2')
print '\n'
crossmatching.catalog_writter('2CM_{}'.format(FILE_NAME_2), compare = '2to1')
FILE_NAME_FINAL = raw_input("Please, tell me the FINAL name: ")
if crossmatching.cont1to2<crossmatching.cont2to1:
catag_final_1 = CatalogReader('2CM_{}{}'.format(FILE_NAME_1, type_fcat))
catag_final_1.read()
catag_final_2 = CatalogReader('2CM_{}{}'.format(FILE_NAME_2, type_fcat))
catag_final_2.read()
crossmatching_final = CrossMatching(catag_final_1.fcat, catag_final_2.fcat)
crossmatching_final.kdtree(n=1*1e-06)
crossmatching.catalog_writter('{}'.format(FILE_NAME_FINAL), compare = '1to2')
if crossmatching.cont1to2>crossmatching.cont2to1:
catag_final_1 = CatalogReader('2CM_{}{}'.format(FILE_NAME_1, type_fcat))
catag_final_1.read()
catag_final_2 = CatalogReader('2CM_{}{}'.format(FILE_NAME_2, type_fcat))
catag_final_2.read()
crossmatching_final = CrossMatching(catag_final_1.fcat, catag_final_2.fcat)
crossmatching_final.kdtree(n=1*1e-06)
crossmatching.catalog_writter('{}'.format(FILE_NAME_FINAL), compare = '2to1')
if crossmatching.cont1to2==crossmatching.cont2to1:
catag_final_1 = CatalogReader('2CM_{}{}'.format(FILE_NAME_1, type_fcat))
catag_final_1.read()
catag_final_2 = CatalogReader('2CM_{}{}'.format(FILE_NAME_2, type_fcat))
catag_final_2.read()
crossmatching_final = CrossMatching(catag_final_1.fcat, catag_final_2.fcat)
crossmatching_final.kdtree(n=1*1e-06)
crossmatching.catalog_writter('{}'.format(FILE_NAME_FINAL), compare = '1to2')
catalog_name_fiat_corrected_final = '{}{}'.format(FILE_NAME_FINAL, fcat)
#(17): Transform again tshe corrected catalog into a GOOD catalog
catalog_name_corrected_good= '{}{}'.format(FILE_NAME_FINAL, type_good)
terminal_corrected_good= 'perl fiatfilter.pl "x>{} && x<{} && y>{} && y<{}" {}>{}'.format(xmin_good, xmax_good, ymin_good, ymax_good, catalog_name_fiat_corrected_final, catalog_name_corrected_good)
subprocess.call(terminal_corrected_good, shell=True)
FILE_NAME_CORRECTED='{}_corrected'.format(FILE_NAME_FINAL)
#(18): STARS CATALOG again...
print("Now we need to repeat the classification to obtain only galaxies and stars as we did before")
print("")
print("Let me show you again the FWHM vs MAG plot \n")
print("")
fcat_corrected=np.genfromtxt(catalog_name_corrected_good, names=names)
plotter(fcat_corrected, 'mag_iso', 'FWHM', 3, '$mag(iso)$', '$FWHM$')
plt.show(block=False)
print("First stars...")
print("")
catalog_name_fiat_corrected_stars=''
catalog_name_fiat_corrected_stars, FWHM_max_stars=stars_maker(catalog_name_corrected_good, FILE_NAME_CORRECTED)
fcat_stars_corrected=np.genfromtxt(catalog_name_fiat_corrected_stars, names=names)
ellipticity(fcat_stars_corrected, 20)
plt.show(block=False)
#(19): GALAXIES CATALOG again...
print("")
print("Second galaxies...")
print("")
catalog_name_fiat_corrected_galaxies=galaxies_maker(catalog_name_corrected_good, FILE_NAME_CORRECTED, FWHM_max_stars)
fcat_galaxies_corrected=np.genfromtxt(catalog_name_fiat_corrected_galaxies, names=names)
# (***) CHECKING FOR STARS // GALAXIES DIVISION
weights_stars=np.ones_like(fcat_stars_corrected['class_star'])/len(fcat_stars_corrected['class_star'])
weights_galaxies=np.ones_like(fcat_galaxies_corrected['class_star'])/len(fcat_galaxies_corrected['class_star'])
weights_all = np.ones_like(fcat_corrected['class_star'])/len(fcat_corrected['class_star'])
plt.figure()
plt.hist(fcat_stars_corrected['class_star'], weights = weights_stars, bins= 10, histtype='stepfilled', label ='stars')
plt.hist(fcat_galaxies_corrected['class_star'], weights = weights_galaxies, bins= 15, histtype='stepfilled', label ='galaxies')
plt.legend(loc='upper right')
plt.xlabel('$class_{star}$', labelpad=20, fontsize=20)
plt.ylabel('$Frequency$', fontsize=20)
plt.show()
plt.hist(fcat_corrected['class_star'], color= 'r', weights = weights_all, bins=50, histtype='stepfilled', label ='all')
plt.legend(loc='upper right')
plt.xlabel('$class_{star}$', labelpad=20, fontsize=20)
plt.ylabel('$Frequency$', fontsize=20)
plt.show()
#(20): ELLIPTO CATALOG and SHAPES CATALOG (only galaxies) again...
catalog_name_ellipto_stars_corrected='{}{}'.format(FILE_NAME_CORRECTED, type_ellipto_stars)
ellipto_caller(catalog_name_fiat_corrected_stars, fits, catalog_name_ellipto_stars_corrected)
catalog_name_ellipto_galaxies_corrected='{}{}'.format(FILE_NAME_CORRECTED, type_ellipto_galaxies)
ellipto_caller(catalog_name_fiat_corrected_galaxies, fits, catalog_name_ellipto_galaxies_corrected)
catalog_name_shapes_galaxies_corrected='{}{}'.format(FILE_NAME_CORRECTED, type_shapes_galaxies)
fiatfilter_errcode_galaxies_corrected='perl fiatfilter.pl -v "errcode<2" {}>{}'.format(catalog_name_ellipto_galaxies_corrected, catalog_name_shapes_galaxies_corrected)
subprocess.call(fiatfilter_errcode_galaxies_corrected, shell=True)
catalog_name_shapes_stars_corrected='{}{}'.format(FILE_NAME_CORRECTED, type_shapes_stars)
fiatfilter_errcode_stars_corrected='perl fiatfilter.pl -v "errcode<2" {}>{}'.format(catalog_name_ellipto_stars_corrected, catalog_name_shapes_stars_corrected)
subprocess.call(fiatfilter_errcode_stars_corrected, shell=True)
if __name__ == "__main__":
main()
| gpl-3.0 |
NP-Omix/BioCompass | BioCompass/table_1_extender.py | 2 | 4650 | from sys import argv
from Bio import SeqIO
import pandas as pd
import re
import itertools
import os.path
script, strain_name, cluster_number = argv
table1_df = pd.read_csv('../outputs/tables/%s_%s_table1.csv'%(strain_name,cluster_number), sep='\t')
def find_boarders(file_name):
with open(file_name).xreadlines() as f:
for num, line in enumerate(f):
header = re.search(r'^(\d*). (\S*)_(\S*)$',line)
if header != None and num not in numbers:
numbers.append(num)
headers.append(header.group())
temp_dict = dict(zip(numbers, headers))
return temp_dict
def itinerate_temp_file(file_name,temp_dict):
""" Extract from file_name a section defined by i into temp.txt"""
with open(file_name) as fp:
temp_file = open("temp.txt", "w")
for num, line in enumerate(fp):
# Is not the last one
if i[0] != sorted(temp_dict)[-1]:
if num >= i[0] and num < j[0]:
temp_file.writelines(line)
else:
if num >= i[0]:
temp_file.writelines(line)
temp_file.close()
def find_best_hits(table1_df):
""" For each ctg1_* @ table1_df['locus_tag'], get
"""
for item in table1_df['locus_tag']:
with open("temp.txt", "r").xreadlines() as tf:
for line in tf:
# query gene, subject gene, %identity, blast score, %coverage, e-value
# e.g line <- 'ctg1_160\tSCATT_35940\t69\t169\t100.0\t2.3e-40\t\n'
best_hit = re.search(r'^%s\t(\S*)\t(\d*)\t(\d*)\t(\d*)'%item,line)
if best_hit and item not in col7:
# e.g. ctg1_160
col7.append(item)
# e.g i[1] <- '1. CP003219_c13'
hit = re.search(r'^(\d*). (\S*)_(\S*)',i[1])
# e.g. 'CP003219'
col8.append(hit.group(2))
# e.g. 'SCATT_35940'
col9.append(best_hit.group(1))
# e.g. '69'
col10.append(best_hit.group(2))
# e.g. '100'
col11.append(best_hit.group(4))
#find boarders for temp_file
short_cluster_number = re.search(r'0*([0-9]*)',cluster_number).group(1)
file_name = '../antiSMASH_input/%s/clusterblast/cluster%s.txt' % (strain_name,short_cluster_number)
numbers = []
headers = []
temp_dict = []
if os.path.isfile(file_name):
temp_dict = find_boarders(file_name)
col7 = []
col8 = []
col9 = []
col10 = []
col11 = []
it = iter(sorted(temp_dict.iteritems()))
i = it.next()
# e.g. i <- (139, '1. CP003219_c13')
if len(temp_dict) > 1:
j = it.next()
while True:
if i[0] == sorted(temp_dict)[-1]:
itinerate_temp_file(file_name,temp_dict)
find_best_hits(table1_df)
break
else:
itinerate_temp_file(file_name,temp_dict)
find_best_hits(table1_df)
i = j
if j[0] != sorted(temp_dict)[-1]:
j = it.next()
"""
ctg1_160 SCATT_35940 69 169 100.0 2.3e-40
hence:
best_hit_loc ->
SCATT_35940 AEW95965 3878018 3878386 + 50S_ribosomal_protein_L14
"""
col12 = []
for item in col9:
with open(file_name, "r").xreadlines() as tf:
seen = []
for line in tf:
best_hit_loc = re.search(r'^%s\t(\S*)\t(.*)'%item,line)
if best_hit_loc and item not in seen:
col12.append(best_hit_loc.group(1))
seen.append(item)
frames = {'locus_tag':col7,'best_hit_BGC':col8,'best_hit_gene':col9,'best_hit_%id':col10,'best_hit_%cov':col11,'best_hit_gene_loc':col12}
new_cols_df = pd.DataFrame(frames, index=None)
table1_df = pd.merge(table1_df, new_cols_df, on='locus_tag', how='outer')
table1_df.fillna('None', inplace=True)
else:
col7 = []
col8 = []
for item in table1_df['locus_tag']:
col7.append(item)
col8.append('None')
frames = {'locus_tag':col7,'best_hit_BGC':col8}
new_cols_df = pd.DataFrame(frames, index=None)
table1_df = pd.merge(table1_df, new_cols_df, on='locus_tag', how='outer')
table1_handle = pd.HDFStore('../outputs/tables/table1.h5')
table1_handle['%s_%s' % (strain_name,cluster_number)] = table1_df
table1_handle.close()
table1_handle = open('../outputs/tables/%s_%s_table1.csv'%(strain_name,cluster_number), "w")
table1_df.to_csv(table1_handle, sep='\t', index=False)
table1_handle.close()
| bsd-3-clause |
quasiben/bokeh | bokeh/charts/builders/step_builder.py | 9 | 4531 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Step class which lets you build your Step charts just
passing the arguments to the Chart class and calling the proper functions.
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from __future__ import absolute_import
from ..builder import create_and_build
from .line_builder import LineBuilder
from ..glyphs import StepGlyph
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
def Step(data=None, x=None, y=None, **kws):
""" Create a step chart using :class:`StepBuilder
<bokeh.charts.builder.step_builder.StepBuilder>` to render the geometry
from the inputs.
.. note::
Only the x or y axis can display multiple variables, while the other is used
as an index.
Args:
data (list(list), numpy.ndarray, pandas.DataFrame, list(pd.Series)): a 2d data
source with columns of data for each stepped line.
x (str or list(str), optional): specifies variable(s) to use for x axis
y (str or list(str), optional): specifies variable(s) to use for y axis
In addition to the parameters specific to this chart,
:ref:`userguide_charts_defaults` are also accepted as keyword parameters.
.. note::
This chart type differs on input types as compared to other charts,
due to the way that series-type charts typically are plotting labeled series.
For example, a column for AAPL stock prices over time. Another way this could be
plotted is to have a DataFrame with a column of `stock_label` and columns of
`price`, which is the stacked format. Both should be supported, but the former
is the expected one. Internally, the latter format is being derived.
Returns:
:class:`Chart`: includes glyph renderers that generate the stepped lines
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.charts import Step, show, output_file
# build a dataset where multiple columns measure the same thing
data = dict(
stamp=[.33, .33, .34, .37, .37, .37, .37, .39, .41, .42,
.44, .44, .44, .45, .46, .49, .49],
postcard=[.20, .20, .21, .23, .23, .23, .23, .24, .26, .27,
.28, .28, .29, .32, .33, .34, .35]
)
# create a step chart where each column of measures receives a unique color and dash style
step = Step(data, y=['stamp', 'postcard'],
dash=['stamp', 'postcard'],
color=['stamp', 'postcard'],
title="U.S. Postage Rates (1999-2015)",
ylabel='Rate per ounce', legend=True)
output_file("steps.html")
show(step)
"""
kws['x'] = x
kws['y'] = y
return create_and_build(StepBuilder, data, **kws)
class StepBuilder(LineBuilder):
"""This is the Step builder and it is in charge of plotting
Step charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges, and finally add the
needed stepped lines taking the references from the source.
"""
def yield_renderers(self):
for group in self._data.groupby(**self.attributes):
glyph = StepGlyph(x=group.get_values(self.x.selection),
y=group.get_values(self.y.selection),
line_color=group['color'],
dash=group['dash'])
# save reference to composite glyph
self.add_glyph(group, glyph)
# yield each renderer produced by composite glyph
for renderer in glyph.renderers:
yield renderer
| bsd-3-clause |
huzq/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 17 | 7971 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the :class:`~sklearn.svm.OneClassSVM`
as our modeling tool. The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <https://matplotlib.org/basemap/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://rob.schapire.net/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9998], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean) / std)
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
IndraVikas/scikit-learn | sklearn/mixture/gmm.py | 128 | 31069 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
# Bertrand Thirion <[email protected]>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
if thresh is not None:
warnings.warn("'thresh' has been replaced by 'tol' in 0.16 "
" and will be removed in 0.18.",
DeprecationWarning)
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
# this line should be removed when 'thresh' is removed in v0.18
tol = (self.tol if self.thresh is None
else self.thresh / float(X.shape[0]))
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
# (should compare to self.tol when deprecated 'thresh' is
# removed in v0.18)
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weights
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
yanlend/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 305 | 4121 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
RachitKansal/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 320 | 3369 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
mattgiguere/scikit-learn | sklearn/cross_validation.py | 3 | 57208 | """
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
check_array, column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
__all__ = ['KFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = cross_validation.KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = cross_validation.StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
This generator must include all elements in the test set exactly once.
Otherwise, a ValueError is raised.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
p = np.concatenate([p for p, _ in preds_blocks])
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, X.shape[0]):
raise ValueError('cross_val_predict only works for partitions')
preds = p.copy()
preds[locs] = p
return preds
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, a cv generator instance, or None
The input specifying which cv generator to use. It can be an
integer, in which case it is the number of folds in a KFold,
None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
return _check_cv(cv, X=X, y=y, classifier=classifier)
def _check_cv(cv, X=None, y=None, classifier=False):
# This exists for internal use while indices is being deprecated.
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.",
DeprecationWarning)
allow_nd = options.pop('allow_nd', None)
allow_lists = options.pop('allow_lists', None)
if allow_lists is not None:
warnings.warn("The allow_lists option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if allow_nd is not None:
warnings.warn("The allow_nd option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if allow_lists is False or allow_nd is False:
arrays = [check_array(x, 'csr', allow_nd=allow_nd,
force_all_finite=False, ensure_2d=False)
if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
NelisVerhoef/scikit-learn | sklearn/metrics/tests/test_classification.py | 83 | 49782 | from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import label_binarize
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
assert_dep_warning = partial(assert_warns, DeprecationWarning)
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings),
({'average': 'micro'}, assert_dep_warning)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
@ignore_warnings
def test_precision_recall_f_extra_labels():
"""Test handling of explicit additional (not in input) labels to PRF
"""
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average=None)
assert_array_almost_equal([0., 1., 1., .5, 0.], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average='macro')
assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual)
# No effect otheriwse
for average in ['micro', 'weighted', 'samples']:
if average == 'samples' and i == 0:
continue
assert_almost_equal(recall_score(y_true, y_pred,
labels=[0, 1, 2, 3, 4],
average=average),
recall_score(y_true, y_pred, labels=None,
average=average))
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, 'macro', 'micro', 'samples']:
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(6), average=average)
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(-1, 4), average=average)
@ignore_warnings
def test_precision_recall_f_ignored_labels():
"""Test a subset of labels may be requested for PRF"""
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([.5, 1.], recall_13(average=None))
assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro'))
assert_almost_equal((.5 * 2 + 1. * 1) / 3,
recall_13(average='weighted'))
assert_almost_equal(2. / 3, recall_13(average='micro'))
# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert_not_equal(recall_13(average=average),
recall_all(average=average))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situtation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='macro')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
def test_cohen_kappa():
# These label vectors reproduce the contingency matrix from Artstein and
# Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]).
y1 = np.array([0] * 40 + [1] * 60)
y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50)
kappa = cohen_kappa_score(y1, y2)
assert_almost_equal(kappa, .348, decimal=3)
assert_equal(kappa, cohen_kappa_score(y2, y1))
# Add spurious labels and ignore them.
y1 = np.append(y1, [2] * 4)
y2 = np.append(y2, [2] * 4)
assert_equal(cohen_kappa_score(y1, y2, labels=[0, 1]), kappa)
assert_almost_equal(cohen_kappa_score(y1, y1), 1.)
# Multiclass example: Artstein and Poesio, Table 4.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 52 + [1] * 32 + [2] * 16)
assert_almost_equal(cohen_kappa_score(y1, y2), .8013, decimal=4)
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_multiclass_pos_label_none():
# Test Precision Recall and F1 Score for multiclass classification task
# GH Issue #1296
# initialize data
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
pos_label=None,
average='weighted')
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='weighted'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='weighted'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
if np_version[:3] < (1, 7, 0):
expected_message = ("NumPy < 1.7.0 does not implement"
" searchsorted on unicode data correctly.")
assert_raise_message(RuntimeError, expected_message,
classification_report, y_true, y_pred)
else:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
_, y_true = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=0)
_, y_pred = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=1)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, np.logical_not(y2)), 1)
assert_equal(hamming_loss(y1, np.logical_not(y1)), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1]])
y_pred = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [1, 0, 1, 0]])
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
# tp = [0, 1, 1, 0]
# fn = [1, 0, 0, 1]
# fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weighted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check samples
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 1], [0, 0, 0, 1], [1, 1, 0, 0]])
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check samples
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true = np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 0], [0, 0, 0, 1], [0, 1, 1, 0]])
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single postive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='macro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='macro')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_compat():
# Ensure warning if f1_score et al.'s average is implicit for multiclass
y_true = [1, 2, 3, 3]
y_pred = [1, 2, 3, 1]
y_true_bin = [0, 1, 1]
y_pred_bin = [0, 1, 0]
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
score = assert_warns(DeprecationWarning, metric, y_true, y_pred)
score_weighted = assert_no_warnings(metric, y_true, y_pred,
average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default')
# check binary passes without warning
assert_no_warnings(metric, y_true_bin, y_pred_bin)
# but binary with pos_label=None should behave like multiclass
score = assert_warns(DeprecationWarning, metric,
y_true_bin, y_pred_bin, pos_label=None)
score_weighted = assert_no_warnings(metric, y_true_bin, y_pred_bin,
pos_label=None, average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default with '
'binary data and pos_label=None')
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Can't handle mix of {0} and {1}".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
# Make sure seq of seq is not supported
y1 = [(1, 2,), (0, 2, 3)]
y2 = [(2,), (0, 2,)]
msg = ('You appear to be using a legacy multi-label data representation. '
'Sequence of sequences are no longer supported; use a binary array'
' or sparse matrix instead.')
assert_raise_message(ValueError, msg, _check_targets, y1, y2)
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, 0.24],
[-2.36, -0.79, -0.27, 0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
| bsd-3-clause |
jimsrc/seatos | shared_lib/shared_funcs_ii.py | 1 | 31654 | from numpy import *
from pylab import *
from datetime import datetime, time, timedelta
import numpy as np
import console_colors as ccl
from scipy.io.netcdf import netcdf_file
from ShiftTimes import *
import os
import matplotlib.patches as patches
import matplotlib.transforms as transforms
#from read_NewTable import tshck, tini_icme, tend_icme, tini_mc, tend_mc, n_icmes, MCsig
#from z_expansion_gulisano import z as z_exp
def flags2nan(VAR, FLAG):
cond = VAR < FLAG
VAR = np.array(VAR)
VAR[~cond] = np.nan
return VAR
def date_to_utc(fecha):
utc = datetime(1970, 1, 1, 0, 0, 0, 0)
time = (fecha - utc).total_seconds()
return time
def dates_from_omni(t):
time = []
n = len(t)
for i in range(n):
yyyy = t[i][0]
mm = t[i][1]
dd = t[i][2]
HH = t[i][3]
MM = t[i][4]
SS = t[i][5]
uSS = t[i][6]
time += [datetime(yyyy, mm, dd, HH, MM, SS, uSS)]
return time
def utc_from_omni(file):
t = np.array(file.variables['time'].data)
dates = dates_from_omni(t)
n = len(dates)
time = np.zeros(n)
for i in range(n):
time[i] = date_to_utc(dates[i])
return time
def selecc_data(data, tshk):
time = data[0] #[s] utc sec
rate = data[1]
day = 86400. # [seg]
utc = datetime(1970, 1, 1, 0, 0, 0, 0)
tshk_utc = (tshk - utc).total_seconds()
ti = tshk_utc - 10.*day # [seg] utc
tf = tshk_utc + 30.*day
cond = (time > ti) & (time < tf)
time = (time[cond] - tshk_utc) / day # [days] since shock
rate = rate[cond]
return (time, rate)
def selecc_window(data, tini, tend):
time = data[0] #[s] utc sec
y = data[1]
day = 86400. # [seg]
utc = datetime(1970, 1, 1, 0, 0, 0, 0)
tini_utc = (tini - utc).total_seconds() # [s] utc sec
tend_utc = (tend - utc).total_seconds() # [s] utc sec
ti = tini_utc # [seg] utc
tf = tend_utc
cond = (time > ti) & (time < tf)
time = (time[cond] - tini_utc) / day # [days] since 'ti'
y = y[cond]
return (time, y)
def enoughdata(var, fgap):
n = len(var)
ngood = len(find(~isnan(var)))
fdata = 1.*ngood/n # fraccion de data sin gaps
if fdata>=(1.-fgap):
return True
else:
return False
def averages_and_std(n_icmes, t_shck, ti_icme, dTday, nbin, t_utc, VAR, fgap):
day = 86400.
nok=0; nbad=0
adap = []
for i in range(n_icmes):
dT = (ti_icme[i] - t_shck[i]).total_seconds()/day # [day]
if dT>dTday:
dt = dT/nbin
t, var = selecc_window(
[t_utc, VAR],
t_shck[i], ti_icme[i]
)
if enoughdata(var, fgap): # pido q haya mas del 80% NO sean gaps
adap += [adaptar(nbin, dt, t, var)]
nok +=1
else:
continue
else:
print " i:%d ---> Este evento es muy chico!, dT/day:%g" % (i, dT)
nbad +=1
VAR_adap = zeros(nbin*nok).reshape(nok, nbin)
for i in range(nok):
VAR_adap[i,:] = adap[i][1]
VAR_avrg = zeros(nbin)
VAR_std = zeros(nbin)
ndata = zeros(nbin)
for i in range(nbin):
cond = ~isnan(VAR_adap.T[i,:])
ndata[i] = len(find(cond)) # nro de datos != flag
VAR_avrg[i] = mean(VAR_adap.T[i,cond]) # promedio entre los valores q no tienen flag
VAR_std[i] = std(VAR_adap.T[i,cond]) # std del mismo conjunto de datos
tnorm = adap[0][0]
return [nok, nbad, tnorm, VAR_avrg, VAR_std, ndata]
def adaptar(n, dt, t, r):
#n = int(5./dt) # nro de puntos en todo el intervalo de ploteo
tt = zeros(n)
rr = zeros(n)
for i in range(n):
tmin = i*dt
tmax = (i+1.)*dt
cond = (t>tmin) & (t<tmax)
tt[i] = mean(t[cond])
rr[i] = mean(r[cond])
return [tt/(n*dt), rr]
def adaptar(nwndw, dT, n, dt, t, r):
#n = int(5./dt) # nro de puntos en todo el intervalo de ploteo
tt = zeros(n)
rr = zeros(n)
_nbin_ = n/(1+nwndw[0]+nwndw[1]) # nro de bins en la sheath
for i in range(n):
tmin = (i-nwndw[0]*_nbin_)*dt
tmax = tmin + dt
cond = (t>tmin) & (t<tmax)
tt[i] = mean(t[cond])#; print "tt:", t[i]; pause(1)
rr[i] = mean(r[cond])
return [tt/dT, rr] # tiempo normalizado x la duracion de la sheath
def adaptar_ii(nwndw, dT, n, dt, t, r, fgap):
#n = int(5./dt) # nro de puntos en todo el intervalo de ploteo
tt = zeros(n)
rr = zeros(n)
_nbin_ = n/(1+nwndw[0]+nwndw[1]) # nro de bins en la sheath/mc
cc = (t>0.) & (t<dT) # intervalo de la sheath/mc
enough = enoughdata(r[cc], fgap) # [bool] True si hay mas del 80% de data buena.
if not(enough): rr = nan*ones(n) # si no hay suficiente data, este evento no aporta
for i in range(n):
tmin = (i-nwndw[0]*_nbin_)*dt
tmax = tmin + dt
cond = (t>tmin) & (t<tmax)
tt[i] = mean(t[cond])#; print "tt:", t[i]; pause(1)
if enough:
cc = ~isnan(r[cond]) # no olvidemos filtrar los gaps
rr[i] = mean(r[cond][cc])
return enough, [tt/dT, rr] # tiempo normalizado x la duracion de la sheath/mc/etc
def selecc_window_ii(nwndw, data, tini, tend):
time = data[0] #[s] utc sec
y = data[1]
day = 86400. # [seg]
utc = datetime(1970, 1, 1, 0, 0, 0, 0)
tini_utc = (tini - utc).total_seconds() # [s] utc sec
tend_utc = (tend - utc).total_seconds() # [s] utc sec
dt = tend_utc - tini_utc
ti = tini_utc - nwndw[0]*dt # [seg] utc
tf = tend_utc + nwndw[1]*dt
cond = (time > ti) & (time < tf)
time = (time[cond] - tini_utc) / day # [days] since 'ti'
y = y[cond]
return (time, y)
def averages_and_std_ii(nwndw,
SELECC, #MCsig, MCwant,
n_icmes, tini, tend, dTday, nbin, t_utc, VAR):
day = 86400.
nok=0; nbad=0
adap = []
for i in range(n_icmes):
dT = (tend[i] - tini[i]).total_seconds()/day # [day]
if ((dT>dTday) & SELECC[i]):# (MCsig[i]>=MCwant)):
dt = dT*(1+nwndw[0]+nwndw[1])/nbin
t, var = selecc_window_ii(
nwndw, # nro de veces hacia atras y adelante
[t_utc, VAR],
tini[i], tend[i]
)
adap += [adaptar(nwndw, dT, nbin, dt, t, var)] # rebinea usando 'dt' como el ancho de nuevo bineo
nok +=1
else:
print " i:%d ---> Filtramos este evento!, dT/day:%g" % (i, dT)
nbad +=1
VAR_adap = zeros(nbin*nok).reshape(nok, nbin)
for i in range(nok):
VAR_adap[i,:] = adap[i][1]
VAR_avrg = zeros(nbin)
VAR_medi = zeros(nbin)
VAR_std = zeros(nbin)
ndata = zeros(nbin)
for i in range(nbin):
cond = ~isnan(VAR_adap.T[i,:])
ndata[i] = len(find(cond)) # nro de datos != flag
VAR_avrg[i] = mean(VAR_adap.T[i,cond]) # promedio entre los valores q no tienen flag
VAR_medi[i] = median(VAR_adap.T[i,cond])# mediana entre los valores q no tienen flag
VAR_std[i] = std(VAR_adap.T[i,cond]) # std del mismo conjunto de datos
tnorm = adap[0][0]
return [nok, nbad, tnorm, VAR_avrg, VAR_medi, VAR_std, ndata]
def mvs_for_each_event(VAR_adap, nbin, nwndw, Enough):
nok = size(VAR_adap, axis=0)
mvs = np.zeros(nok) # valores medios por cada evento
binsPerTimeUnit = nbin/(1+nwndw[0]+nwndw[1]) # nro de bines por u. de tiempo
start = nwndw[0]*binsPerTimeUnit # en este bin empieza la MC
#print " ----> binsPerTimeUnit: ", binsPerTimeUnit
#print " ----> nok: ", nok
#print " ----> VAR_adap.shape: ", VAR_adap.shape
#print " ----> VAR_adap: \n", VAR_adap
#raw_input()
for i in range(nok):
aux = VAR_adap[i, start:start+binsPerTimeUnit] # (*)
cc = ~isnan(aux) # pick good-data only
#if len(find(cc))>1:
if Enough[i]: # solo imprimo los q tienen *suficiente data*
print ccl.G
print "id %d/%d: "%(i+1, nok), aux[cc]
print ccl.W
mvs[i] = np.mean(aux[cc])
else:
mvs[i] = np.nan
#(*): esta es la serie temporal (de esta variable) para el evento "i"
pause(1)
return mvs
def diff_dates(tend, tini):
n = len(tend)
diffs = np.nan*np.ones(n)
for i in range(n):
ok = type(tend[i]) == type(tini[i]) == datetime # ambos deben ser fechas!
if ok:
diffs[i] = (tend[i] - tini[i]).total_seconds()
else:
diffs[i] = np.nan
return diffs #[sec]
def write_variable(fout, varname, dims, var, datatype, comments):
dummy = fout.createVariable(varname, datatype, dims)
dummy[:] = var
dummy.units = comments
def calc_beta(Temp, Pcc, B):
# Agarramos la definicion de OMNI, de:
# http://pamela.roma2.infn.it/index.php
# Beta = [(4.16*10**-5 * Tp) + 5.34] * Np/B**2 (B in nT)
#
beta = ((4.16*10**-5 * Temp) + 5.34) * Pcc/B**2
return beta
def thetacond(ThetaThres, ThetaSh):
if ThetaThres<=0.:
print ccl.Rn + ' ----> BAD WANG FILTER!!: ThetaThres<=0.'
print ' ----> Saliendo...' + ccl.Rn
raise SystemExit
#return ones(len(ThetaSh), dtype=bool)
else:
return (ThetaSh > ThetaThres)
def wangflag(ThetaThres):
if ThetaThres<0:
return 'NaN'
else:
return str(ThetaThres)
def makefig(medVAR, avrVAR, stdVAR, nVAR, tnorm,
SUBTITLE, YLIMS, YLAB, fname_fig):
fig = figure(1, figsize=(13, 6))
ax = fig.add_subplot(111)
ax.plot(tnorm, avrVAR, 'o-', color='black', markersize=5, label='mean')
ax.plot(tnorm, medVAR, 'o-', color='red', alpha=.5, markersize=5, markeredgecolor='none', label='median')
inf = avrVAR + stdVAR/np.sqrt(nVAR)
sup = avrVAR - stdVAR/np.sqrt(nVAR)
ax.fill_between(tnorm, inf, sup, facecolor='gray', alpha=0.5)
trans = transforms.blended_transform_factory(
ax.transData, ax.transAxes)
rect1 = patches.Rectangle((0., 0.), width=1.0, height=1,
transform=trans, color='blue',
alpha=0.3)
ax.add_patch(rect1)
ax.legend(loc='upper right')
ax.grid()
ax.set_ylim(YLIMS)
TITLE = SUBTITLE
ax.set_title(TITLE)
ax.set_xlabel('time normalized to MC passage time [1]', fontsize=14)
ax.set_ylabel(YLAB, fontsize=20)
savefig(fname_fig, format='png', dpi=180, bbox_inches='tight')
close()
class general:
def __init__(self):
name='name'
class events_mgr:
def __init__(self, gral, FILTER, CUTS, bd, nBin, fgap, tb, z_exp):
#self.fnames = fnames
self.data_name = gral.data_name
self.FILTER = FILTER
self.CUTS = CUTS
self.bd = bd
self.nBin = nBin
self.fgap = fgap
self.tb = tb
self.z_exp = z_exp
self.dir_plots = gral.dirs['dir_plots']
self.dir_ascii = gral.dirs['dir_ascii']
self.f_sc = netcdf_file(gral.fnames['ACE'], 'r')
self.f_events = netcdf_file(gral.fnames['table_richardson'], 'r')
print " -------> archivos input leidos!"
def run_all(self):
#----- seleccion de eventos
self.filter_events()
print "\n ---> filtrado de eventos (n:%d): OK\n" % (self.n_SELECC)
#----- load data y los shiftimes "omni"
self.load_data_and_timeshift()
#----- rebineo y promedios
self.rebine_and_avr()
#----- hacer ploteos
self.make_plots()
#----- archivos "stuff"
self.build_params_file()
def rebine_and_avr(self):
"""def avrs_and_stds(nwndw,
SELECC, #MCsig, MCwant,
n_icmes, tini, tend, dTday, nbin, t_utc, VARS, fgap):"""
nvars = self.nvars #len(VARS)
n_icmes = self.tb.n_icmes
bd = self.bd
VARS = self.VARS
nbin = self.nBin['total']
nwndw = [self.nBin['before'], self.nBin['after']]
day = 86400.
"""print " ---> nbin: ", nbin
print " ---> t_utc[0]: ", self.t_utc[0]
print " ---> t_utc[-1]: ", self.t_utc[-1]
print " ---> fgap: ", self.fgap
print " ---> VARS[-1][1]: ", self.VARS[-1][1]
print " ---> nwndw: ", nwndw
print " ---> dTday: ", self.CUTS['dTday']
print " ---> tini[0]: ", bd.tini[0]
print " ---> tend[-110]: ", bd.tend[-110]"""
#raw_input()
ADAP = [] # conjunto de varios 'adap' (uno x c/variable)
# recorremos los eventos:
nok=0; nbad=0;
nEnough = np.zeros(nvars)
Enough = np.zeros(n_icmes*nvars, dtype=bool).reshape(n_icmes, nvars)
Enough = []
nnn = 0 # nro de evento q pasan el filtro a-priori
#---- quiero una lista de los eventos-id q van a incluirse en c/promedio :-)
IDs = {}
for j in range(nvars):
varname = VARS[j][1]
IDs[varname] = []
for i in range(n_icmes):
#nok=0; nbad=0;
ok=False
try: #no todos los elementos de 'tend' son fechas (algunos eventos no tienen fecha definida)
dT = (bd.tend[i] - bd.tini[i]).total_seconds()/day # [day]
ok = True
except:
continue # saltar al sgte evento 'i'
#np.set_printoptions(4) # nro de digitos a imprimir cuando use numpy.arrays
if (ok & self.SELECC[i]):# (MCsig[i]>=MCwant)): ---FILTRO--- (*1)
nnn += 1
print ccl.Gn + " id:%d ---> dT/day:%g" % (i, dT) + ccl.W
nok +=1
Enough += [ np.zeros(nvars, dtype=bool) ] # todo False por defecto
# recorremos las variables:
for j in range(nvars):
varname = VARS[j][1]
dt = dT*(1+nwndw[0]+nwndw[1])/nbin
t, var = selecc_window_ii(
nwndw, #rango ploteo
[self.t_utc, VARS[j][0]],
bd.tini[i], bd.tend[i]
)
# rebinea usando 'dt' como el ancho de nuevo bineo
out = adaptar_ii(nwndw, dT, nbin, dt, t, var, self.fgap)
enough = out[0] # True: data con menos de 100*'fgap'% de gap
Enough[nok-1][j] = enough
ADAP += [ out[1] ]
#print " out01: ", out[1]; raw_input()
if enough:
IDs[varname] += [i]
nEnough[j] += 1
else:
print ccl.Rn + " id:%d ---> dT/day:%g" % (i, dT) + ccl.W
nbad +=1
print " ----> len.ADAP: %d" % len(ADAP)
Enough = np.array(Enough)
stuff = []
#nok = len(ADAP)/nvars # (*)
# (*) la dim de 'ADAP' es 'nvars' por el nro de eventos q pasaro el filtro en (*1)
for j in range(nvars):
print ccl.On + " -------> procesando: %s" % VARS[j][3] + " (%d/%d)" % (j+1,nvars)
print " nEnough/nok/(nok+nbad): %d/%d/%d " % (nEnough[j], nok, nok+nbad) + ccl.W
VAR_adap = np.zeros((nok, nbin)) # perfiles rebineados (*)
# (*): uno de estos por variable
# recorro los 'nok' eventos q pasaron el filtro de arriba:
for i in range(nok):
VAR_adap[i,:] = ADAP[i*nvars+j][1] # valores rebineados de la variable "j" para el evento "i"
# valores medios de esta variable para c/evento
avrVAR_adap = mvs_for_each_event(VAR_adap, nbin, nwndw, Enough.T[j])
print " ---> (%d/%d) avrVAR_adap[]: \n" % (j+1,nvars), avrVAR_adap
VAR_avrg = np.zeros(nbin)
VAR_avrgNorm = np.zeros(nbin)
VAR_medi = np.zeros(nbin)
VAR_std = np.zeros(nbin)
ndata = np.zeros(nbin)
for i in range(nbin):
cond = ~np.isnan(VAR_adap.T[i,:]) # filtro eventos q no aportan data en este bin
ndata[i] = len(find(cond)) # nro de datos != nan
VAR_avrg[i] = np.mean(VAR_adap.T[i,cond]) # promedio entre los valores q no tienen flag
VAR_avrgNorm[i] = np.mean(VAR_adap.T[i,cond]/avrVAR_adap[cond])
VAR_medi[i] = np.median(VAR_adap.T[i,cond])# mediana entre los valores q no tienen flag
VAR_std[i] = np.std(VAR_adap.T[i,cond]) # std del mismo conjunto de datos
#--- calculo perfil normalizado por c/variable
#ii = nwndw[0]*binsPerTimeUnit
#AvrInWndw = mean(VAR_avrg[ii:ii+binsPerTimeUnit])
tnorm = ADAP[0][0]
stuff += [[nok, nbad, tnorm, VAR_avrg, VAR_medi, VAR_std, ndata, avrVAR_adap]]
#return stuff, nEnough, Enough, IDs
self.out = OUT = {}
OUT['dVARS'] = stuff
OUT['nEnough'] = nEnough
OUT['Enough'] = Enough
OUT['IDs'] = IDs
OUT['tnorm'] = OUT['dVARS'][0][2]
def load_data_and_timeshift(self):
if self.data_name=='ACE':
self.load_data_ACE()
elif self.data_name=='McMurdo':
self.load_data_McMurdo()
else:
print " --------> BAD 'self.data_name'!!!"
print " exiting.... "
raise SystemExit
def load_data_McMurdo(self):
tb = self.tb
nBin = self.nBin
bd = self.bd
day = 86400.
def load_data_ACE(self):
tb = self.tb
nBin = self.nBin
bd = self.bd
day = 86400.
#----------------------------------------------------------
print " leyendo tiempo..."
t_utc = utc_from_omni(self.f_sc)
print " Ready."
#++++++++++++++++++++ CORRECCION DE BORDES +++++++++++++++++++++++++++++
# IMPORTANTE:
# Solo valido para los "63 eventos" (MCflag='2', y visibles en ACE)
# NOTA: dan saltos de shock mas marcados con True.
if self.FILTER['CorrShift']:
ShiftCorrection(ShiftDts, tb.tshck)
ShiftCorrection(ShiftDts, tb.tini_icme)
ShiftCorrection(ShiftDts, tb.tend_icme)
ShiftCorrection(ShiftDts, tb.tini_mc)
ShiftCorrection(ShiftDts, tb.tend_mc)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
B = np.array(self.f_sc.variables['Bmag'].data)
Vsw = np.array(self.f_sc.variables['Vp'].data)
Temp = np.array(self.f_sc.variables['Tp'].data)
Pcc = np.array(self.f_sc.variables['Np'].data)
rmsB = np.array(self.f_sc.variables['dBrms'].data)
alphar = np.array(self.f_sc.variables['Alpha_ratio'].data)
beta = calc_beta(Temp, Pcc, B)
rmsBoB = rmsB/B
print " -------> variables leidas!"
#------------------------------------ VARIABLES
self.t_utc = t_utc
self.VARS = VARS = []
# variable, nombre archivo, limite vertical, ylabel
VARS += [[B, 'B', [5., 18.], 'B [nT]']]
VARS += [[Vsw, 'V', [380., 650.], 'Vsw [km/s]']]
VARS += [[rmsBoB, 'rmsBoB', [0.01, 0.2], 'rms($\hat B$/|B|) [1]']]
VARS += [[beta, 'beta', [0.001, 5.], '$\\beta$ [1]']]
VARS += [[Pcc, 'Pcc', [2, 17.], 'proton density [#/cc]']]
VARS += [[Temp, 'Temp', [1e4, 4e5], 'Temp [K]']]
VARS += [[alphar, 'AlphaRatio', [1e-3, 0.1], 'alpha ratio [1]']]
self.nvars = len(VARS)
#---------
#nbin = (1+nBin['before']+nBin['after'])*nBin['bins_per_utime'] # [1] nro de bines q quiero en mi perfil promedio
#fgap = 0.2 # fraccion de gap que tolero
# nEnough: nmbr of events aporting good data in 80% of the window
self.aux = aux = {}
aux['SELECC'] = self.SELECC
"""aux['BETW1998_2006'] = BETW1998_2006
aux['DURATION'] = DURATION
if wang_filter: aux['ThetaCond'] = ThetaCond
if vsw_filter: aux['SpeedCond'] = SpeedCond
if z_filter_on: aux['z_cond'] = z_cond
aux['dt_mc'] = dt_mc
aux['dt_sh'] = dt_sh"""
#---- SALIDA:
#self.VARS = VARS
#self.out = out
#self.aux = aux
#---- generar figuras y asciis de los perfiles promedio/mediana
def make_plots(self):
nBin = self.nBin
fgap = self.fgap
MCwant = self.FILTER['MCwant']
#dt_mc = self.aux['dt_mc']
#dt_sh = self.aux['dt_sh']
ThetaThres = self.CUTS['ThetaThres']
v_lo = self.CUTS['v_lo']
v_hi = self.CUTS['v_hi']
z_lo = self.CUTS['z_lo']
z_hi = self.CUTS['z_hi']
nbin = (1+nBin['before']+nBin['after'])*nBin['bins_per_utime'] # [1] nro de bines q quiero en mi perfil promedio
#-------------------- prefijos:
# prefijo para filtro Wang:
#WangFlag = wangflag(ThetaThres) #'NaN' #wangflag(ThetaThres)
if self.FILTER['wang']:
WangFlag = str(ThetaThres)
else:
WangFlag = 'NaN'
# prefijo gral para los nombres de los graficos:
if self.FILTER['CorrShift']:
prexShift = 'wShiftCorr'
else:
prexShift = 'woShiftCorr'
# filtro z-expansion
if not(self.FILTER['z_filter_on']):
z_lo = z_hi = 0.0 # estos valores significan q no hay filtro por z
# filtro por Vmc (veloc del MC)
if not(self.FILTER['vsw_filter']):
v_lo = v_hi = 0.0 # estos valores significan q no hay filtro por Vmc
#-------------------------------
# nombres genericos...
DIR_FIGS = '%s/MCflag%s/%s' % (self.dir_plots, MCwant['alias'], prexShift)
DIR_ASCII = '%s/MCflag%s/%s' % (self.dir_ascii, MCwant['alias'], prexShift)
os.system('mkdir -p %s' % DIR_FIGS)
os.system('mkdir -p %s' % DIR_ASCII)
print ccl.On + " -------> creando: %s" % DIR_FIGS + ccl.W
print ccl.On + " -------> creando: %s" % DIR_ASCII + ccl.W
FNAMEs = 'MCflag%s_%dbefore.%dafter_fgap%1.1f' % (MCwant['alias'], nBin['before'], nBin['after'], fgap)
FNAMEs += '_Wang%s' % (WangFlag)
FNAMEs += '_vlo.%03.1f.vhi.%04.1f' % (v_lo, v_hi)
FNAMEs += '_zlo.%2.2f.zhi.%2.2f' % (z_lo, z_hi)
FNAME_ASCII = '%s/%s' % (DIR_ASCII, FNAMEs)
FNAME_FIGS = '%s/%s' % (DIR_FIGS, FNAMEs)
fname_nro = DIR_ASCII+'/'+'n.events_'+FNAMEs+'.txt'
fnro = open(fname_nro, 'w')
#--------------------------------------------------------------------------------
nvars = len(self.VARS)
for i in range(nvars):
fname_fig = '%s_%s.png' % (FNAME_FIGS, self.VARS[i][1])
print ccl.Rn+ " ------> %s" % fname_fig
varname = self.VARS[i][1]
ylims = self.VARS[i][2]
ylabel = self.VARS[i][3]
mediana = self.out['dVARS'][i][4]
average = self.out['dVARS'][i][3]
std_err = self.out['dVARS'][i][5]
nValues = self.out['dVARS'][i][6] # nmbr of good values aporting data
#binsPerTimeUnit = nbin #nbin/(1+nbefore+nafter)
N_selec = self.out['dVARS'][i][0]
N_final = self.out['nEnough'][i] #nEnough[i]
SUBTITLE = '# of selected events: %d \n\
events w/80%% of data: %d \n\
bins per time unit: %d \n\
MCflag: %s \n\
WangFlag: %s' % (N_selec, N_final, nBin['bins_per_utime'], MCwant['alias'], WangFlag)
makefig(mediana, average, std_err, nValues, self.out['tnorm'], SUBTITLE,
ylims, ylabel, fname_fig)
fdataout = '%s_%s.txt' % (FNAME_ASCII, self.VARS[i][1])
dataout = np.array([self.out['tnorm'] , mediana, average, std_err, nValues])
print " ------> %s\n" % fdataout + ccl.W
np.savetxt(fdataout, dataout.T, fmt='%12.5f')
#-------- grabamos nro de eventos selecc para esta variable
line = '%s %d %d\n' % (varname, N_final, N_selec)
fnro.write(line)
print ccl.Rn + " --> nro de eventos seleccionados: " + fname_nro + ccl.W
fnro.close()
#--- salidas (a parte de los .png)
self.DIR_ASCII = DIR_ASCII
self.FNAMEs = FNAMEs
#---- construye archivo q contiene cosas de los eventos seleccionados:
# - valores medios de los observables (B, Vsw, Temp, beta, etc)
# - los IDs de los eventos
# - duracion de los MCs y las sheaths
def build_params_file(self):
DIR_ASCII = self.DIR_ASCII
FNAMEs = self.FNAMEs
#---------------------------------------------- begin: NC_FILE
print "\n**************************************** begin: NC_FILE"
#------- generamos registro de id's de los
# eventos q entraron en los promedios.
# Nota: un registro por variable.
fname_out = DIR_ASCII+'/'+'_stuff_'+FNAMEs+'.nc' #'./test.nc'
fout = netcdf_file(fname_out, 'w')
print "\n ----> generando: %s\n" % fname_out
IDs = self.out['IDs']
for i in range(len(self.VARS)):
varname = self.VARS[i][1]
print " ----> " + varname
n_events = len(IDs[varname])
dimname = 'nevents_'+varname
fout.createDimension(dimname, n_events)
prom = self.out['dVARS'][i][7]
cc = np.isnan(prom)
prom = prom[~cc]
dims = (dimname,)
write_variable(fout, varname, dims,
prom, 'd', 'average_values per event')
#---------- IDs de esta variable
ids = map(int, IDs[varname])
vname = 'IDs_'+varname
write_variable(fout, vname, dims, ids, 'i',
'event IDs that enter in this parameter average')
#---------- duracion de la estructura
dtsh = np.zeros(len(ids))
dtmc = np.zeros(len(ids))
for i in range(len(ids)):
id = ids[i]
dtsh[i] = self.dt_sh[id]
dtmc[i] = self.dt_mc[id]
vname = 'dt_sheath_'+varname
write_variable(fout, vname, dims, dtsh, 'd', '[days]')
vname = 'dt_mc_'+varname
write_variable(fout, vname, dims, dtmc, 'd', '[days]')
fout.close()
print "**************************************** end: NC_FILE"
#---------------------------------------------- end: NC_FILE
def filter_events(self):
tb = self.tb
FILTER = self.FILTER
ThetaThres = self.CUTS['ThetaThres']
dTday = self.CUTS['dTday']
v_lo = self.CUTS['v_lo']
v_hi = self.CUTS['v_hi']
z_lo = self.CUTS['z_lo']
z_hi = self.CUTS['z_hi']
day = 86400.
#------------------------------------ EVENTS's PARAMETERS
#MCsig = array(f_events.variables['MC_sig'].data) # 2,1,0: MC, rotation, irregular
#Vnsh = array(f_events.variables['wang_Vsh'].data) # veloc normal del shock
ThetaSh = np.array(self.f_events.variables['wang_theta_shock'].data) # orientacion de la normal del shock
i_V = np.array(self.f_events.variables['i_V'].data) # velocidad de icme
#------------------------------------
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#++++++++++++++++++ begin: SELECCION DE EVENTOS ++++++++++++++++++++++
#------- fechas
BETW1998_2006 = np.ones(tb.n_icmes, dtype=bool)
for i in range(307, tb.n_icmes)+range(0, 26):
BETW1998_2006[i]=False # 'False' para excluir eventos
#------- seleccionamos MCs con label-de-catalogo (lepping=2, etc)
MC_FLAG = np.ones(tb.n_icmes, dtype=bool)
for i in range(tb.n_icmes):
MC_FLAG[i] = tb.MCsig[i] in FILTER['MCwant']['flags']
#------- excluimos eventos de 2MCs
EVENTS_with_2MCs= (26, 148, 259, 295)
MCmultiple = FILTER['Mcmultiple'] #False #True para incluir eventos multi-MC
MCmulti = np.ones(tb.n_icmes, dtype=bool) # False para eventos multi-MC (SI, escribi bien)
if(~FILTER['Mcmultiple']):
for i in EVENTS_with_2MCs:
MCmulti[i] &= False
#------- orientacion del shock (catalogo Wang)
if FILTER['wang']:
ThetaCond = thetacond(ThetaThres, ThetaSh)
#------- duration of sheaths
self.dt_mc = diff_dates(tb.tend_mc, tb.tini_mc)/day # [day]
self.dt_sh = diff_dates(tb.tini_mc, tb.tshck)/day # [day]
dt = diff_dates(self.bd.tend, self.bd.tini)/day
DURATION = dt > dTday # sheaths>0
#------- speed of icmes
if (FILTER['vsw_filter']) & (v_lo<v_hi):
SpeedCond = (i_V>=v_lo) & (i_V<v_hi)
#------- z expansion (a. gulisano)
z_exp = self.z_exp
if (FILTER['z_filter_on']) & (z_lo<z_hi):
z_cond = (z_exp>=z_lo) & (z_exp<z_hi)
#------- filtro total
SELECC = np.ones(tb.n_icmes, dtype=bool)
SELECC &= BETW1998_2006 # nos mantenemos en este periodo de anios
SELECC &= MCmulti # nubes multiples
SELECC &= MC_FLAG # catalogo de nubes
SELECC &= DURATION # no queremos sheaths q duran 1hr xq solo aportan ruido
if FILTER['wang']: SELECC &= ThetaCond # cerca a 180 es nariz del shock
if FILTER['vsw_filter']: SELECC &= SpeedCond
if FILTER['z_filter_on']: SELECC &= z_cond # para desactivar este filtro, comentar esta linea
"""print "+++ eventos +++++++++++++++++++++++++++++++++++++++"
for i in range(tb.n_icmes):
if SELECC[i]:
print i
raw_input()"""
self.SELECC = SELECC
self.n_SELECC = len(find(SELECC))
#+++++++++++++++++ end: SELECCION DE EVENTOS +++++++++++++++++++++++++
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
if self.n_SELECC<=0:
print " --------> FATAL ERROR!!!: self.n_SELECC=<0"
print " exiting....... \n"
raise SystemExit
##
| mit |
treycausey/scikit-learn | examples/exercises/plot_iris_exercise.py | 8 | 1577 | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import pylab as pl
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
pl.figure(fig_num)
pl.clf()
pl.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=pl.cm.Paired)
# Circle out the test data
pl.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
pl.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
pl.pcolormesh(XX, YY, Z > 0, cmap=pl.cm.Paired)
pl.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
pl.title(kernel)
pl.show()
| bsd-3-clause |
wasade/qiime | qiime/make_otu_heatmap.py | 1 | 7171 | from __future__ import division
__author__ = "Dan Knights"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = ["Dan Knights", "Greg Caporaso", "Jai Ram Rideout"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "Dan Knights"
__email__ = "[email protected]"
import numpy as np
import matplotlib
matplotlib.use('Agg', warn=False)
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.cluster.hierarchy import linkage
from skbio.tree import TreeNode
from skbio.diversity.beta import pw_distances
from qiime.parse import parse_newick, PhyloNode
from qiime.filter import filter_samples_from_otu_table
def get_overlapping_samples(map_rows, otu_table):
"""Extracts only samples contained in otu table and mapping file.
Returns: new_map_rows, new_otu_table
"""
map_sample_ids = zip(*map_rows)[0]
shared_ids = set(map_sample_ids) & set(otu_table.ids())
otu_table = filter_samples_from_otu_table(otu_table, shared_ids, -np.inf,
np.inf)
new_map = []
for sam_id in map_sample_ids:
if sam_id in shared_ids:
ix = map_sample_ids.index(sam_id)
new_map.append(map_rows[ix])
return new_map, otu_table
def extract_metadata_column(sample_ids, metadata, category):
"""Extracts values from the given metadata column"""
col_ix = metadata[1].index(category)
map_sample_ids = zip(*metadata[0])[0]
category_labels = []
for i, sample_id in enumerate(sample_ids):
if sample_id in map_sample_ids:
row_ix = map_sample_ids.index(sample_id)
entry = metadata[0][row_ix][col_ix]
category_labels.append(entry)
return category_labels
def get_order_from_categories(otu_table, category_labels):
"""Groups samples by category values; clusters within each group"""
category_labels = np.array(category_labels)
sample_order = []
for label in np.unique(category_labels):
label_ix = category_labels == label
selected = [s for (i, s) in zip(label_ix, otu_table.ids()) if i]
sub_otu_table = filter_samples_from_otu_table(otu_table, selected,
-np.inf, np.inf)
data = np.asarray(list(sub_otu_table.iter_data(axis='observation')))
label_ix_ix = get_clusters(data, axis='column')
sample_order += list(np.nonzero(label_ix)[0][np.array(label_ix_ix)])
return np.array(sample_order)
def get_order_from_tree(ids, tree_text):
"""Returns the indices that would sort ids by tree tip order"""
tree = parse_newick(tree_text, PhyloNode)
ordered_ids = []
for tip in tree.iterTips():
if tip.Name in ids:
ordered_ids.append(tip.Name)
return names_to_indices(ids, ordered_ids)
def make_otu_labels(otu_ids, lineages, n_levels=1):
"""Returns 'pretty' OTU labels: 'Lineage substring (OTU ID)'
Lineage substring includes the last n_levels lineage levels
"""
if len(lineages[0]) > 0:
otu_labels = []
for i, lineage in enumerate(lineages):
if n_levels > len(lineage):
otu_label = '%s (%s)' % (';'.join(lineage), otu_ids[i])
else:
otu_label = '%s (%s)' \
% (';'.join(lineage[-n_levels:]), otu_ids[i])
otu_labels.append(otu_label)
otu_labels = [lab.replace('"', '') for lab in otu_labels]
else:
otu_labels = otu_ids
return otu_labels
def names_to_indices(names, ordered_names):
"""Returns the indices that would sort 'names' like 'ordered_names'
"""
indices = []
names_list = list(names)
for ordered_name in ordered_names:
if ordered_name in names_list:
indices.append(names_list.index(ordered_name))
return np.array(indices)
def get_log_transform(otu_table):
"""Returns log10 of the data"""
if otu_table.nnz == 0:
raise ValueError('All values in the OTU table are zero!')
# take log of all values
def h(s_v, s_id, s_md):
return np.log10(s_v)
return otu_table.transform(h, axis='sample', inplace=False)
def get_clusters(x_original, axis='row'):
"""Performs UPGMA clustering using euclidean distances"""
x = x_original.copy()
if axis == 'column':
x = x.T
nr = x.shape[0]
row_dissims = pw_distances(x, ids=map(str, range(nr)), metric='euclidean')
# do upgma - rows
# Average in SciPy's cluster.hierarchy.linkage is UPGMA
linkage_matrix = linkage(row_dissims.condensed_form(), method='average')
tree = TreeNode.from_linkage_matrix(linkage_matrix, row_dissims.ids)
return [int(tip.name) for tip in tree.tips()]
def get_fontsize(numrows):
"""Returns the fontsize needed to make text fit within each row.
"""
thresholds = [25, 50, 75, 100, 125]
sizes = [5, 4, 3, 2, 1.5, 1]
i = 0
while numrows > thresholds[i]:
i += 1
if i == len(thresholds):
break
return sizes[i]
def plot_heatmap(otu_table, row_labels, col_labels, filename, imagetype='pdf',
width=5, height=5, dpi=None, textborder=.25,
color_scheme='YlGn'):
"""Create a heatmap plot, save as a pdf by default.
'width', 'height' are in inches
'textborder' is the fraction of the figure allocated for the
tick labels on the x and y axes
color_scheme: choices can be found at
http://matplotlib.org/examples/color/colormaps_reference.html
"""
nrow = otu_table.length(axis='observation')
ncol = otu_table.length(axis='sample')
# determine appropriate font sizes for tick labels
row_fontsize = get_fontsize(nrow)
col_fontsize = get_fontsize(ncol)
# create figure and plot heatmap
fig, ax = plt.subplots(figsize=(width, height))
data = list(otu_table.iter_data(axis='observation'))
im = plt.imshow(np.fliplr(data), interpolation='nearest', aspect='auto',
cmap=color_scheme)
# imshow is offset by .5 for some reason
plt.xlim(-.5, ncol - .5)
plt.ylim(-.5, nrow - .5)
# add ticklabels to axes
plt.xticks(np.arange(ncol), col_labels[::-1], fontsize=col_fontsize,
rotation=90)
plt.yticks(np.arange(nrow), row_labels, fontsize=row_fontsize)
# turn off tick marks
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
# add space for tick labels
fig.subplots_adjust(left=textborder, bottom=textborder)
# create colorbar (legend) in its own axes so that tight_layout will
# respect both the heatmap and colorbar when it makes room for everything.
# code based on example in:
# http://matplotlib.org/users/tight_layout_guide.html
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", "5%", pad="3%")
cb = plt.colorbar(im, cax=cax)
# set colorbar tick labels to a reasonable value (normal is large)
for t in cb.ax.get_yticklabels():
t.set_fontsize(5)
plt.tight_layout()
fig.savefig(filename, format=imagetype, dpi=dpi)
| gpl-2.0 |
18padx08/PPTex | PPTexEnv_x86_64/lib/python2.7/site-packages/matplotlib/cm.py | 11 | 11669 | """
This module provides a large set of colormaps, functions for
registering new colormaps and for getting a colormap by name,
and a mixin class for adding color mapping functionality.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import numpy as np
from numpy import ma
import matplotlib as mpl
import matplotlib.colors as colors
import matplotlib.cbook as cbook
from matplotlib._cm import datad
from matplotlib._cm import cubehelix
cmap_d = dict()
# reverse all the colormaps.
# reversed colormaps have '_r' appended to the name.
def _reverser(f):
def freversed(x):
return f(1 - x)
return freversed
def revcmap(data):
"""Can only handle specification *data* in dictionary format."""
data_r = {}
for key, val in six.iteritems(data):
if six.callable(val):
valnew = _reverser(val)
# This doesn't work: lambda x: val(1-x)
# The same "val" (the first one) is used
# each time, so the colors are identical
# and the result is shades of gray.
else:
# Flip x and exchange the y values facing x = 0 and x = 1.
valnew = [(1.0 - x, y1, y0) for x, y0, y1 in reversed(val)]
data_r[key] = valnew
return data_r
def _reverse_cmap_spec(spec):
"""Reverses cmap specification *spec*, can handle both dict and tuple
type specs."""
if 'red' in spec:
return revcmap(spec)
else:
revspec = list(reversed(spec))
if len(revspec[0]) == 2: # e.g., (1, (1.0, 0.0, 1.0))
revspec = [(1.0 - a, b) for a, b in revspec]
return revspec
def _generate_cmap(name, lutsize):
"""Generates the requested cmap from it's name *name*. The lut size is
*lutsize*."""
spec = datad[name]
# Generate the colormap object.
if 'red' in spec:
return colors.LinearSegmentedColormap(name, spec, lutsize)
else:
return colors.LinearSegmentedColormap.from_list(name, spec, lutsize)
LUTSIZE = mpl.rcParams['image.lut']
# Generate the reversed specifications ...
for cmapname in list(six.iterkeys(datad)):
spec = datad[cmapname]
spec_reversed = _reverse_cmap_spec(spec)
datad[cmapname + '_r'] = spec_reversed
# Precache the cmaps with ``lutsize = LUTSIZE`` ...
# Use datad.keys() to also add the reversed ones added in the section above:
for cmapname in six.iterkeys(datad):
cmap_d[cmapname] = _generate_cmap(cmapname, LUTSIZE)
locals().update(cmap_d)
# Continue with definitions ...
def register_cmap(name=None, cmap=None, data=None, lut=None):
"""
Add a colormap to the set recognized by :func:`get_cmap`.
It can be used in two ways::
register_cmap(name='swirly', cmap=swirly_cmap)
register_cmap(name='choppy', data=choppydata, lut=128)
In the first case, *cmap* must be a :class:`matplotlib.colors.Colormap`
instance. The *name* is optional; if absent, the name will
be the :attr:`~matplotlib.colors.Colormap.name` attribute of the *cmap*.
In the second case, the three arguments are passed to
the :class:`~matplotlib.colors.LinearSegmentedColormap` initializer,
and the resulting colormap is registered.
"""
if name is None:
try:
name = cmap.name
except AttributeError:
raise ValueError("Arguments must include a name or a Colormap")
if not cbook.is_string_like(name):
raise ValueError("Colormap name must be a string")
if isinstance(cmap, colors.Colormap):
cmap_d[name] = cmap
return
# For the remainder, let exceptions propagate.
if lut is None:
lut = mpl.rcParams['image.lut']
cmap = colors.LinearSegmentedColormap(name, data, lut)
cmap_d[name] = cmap
def get_cmap(name=None, lut=None):
"""
Get a colormap instance, defaulting to rc values if *name* is None.
Colormaps added with :func:`register_cmap` take precedence over
built-in colormaps.
If *name* is a :class:`matplotlib.colors.Colormap` instance, it will be
returned.
If *lut* is not None it must be an integer giving the number of
entries desired in the lookup table, and *name* must be a
standard mpl colormap name with a corresponding data dictionary
in *datad*.
"""
if name is None:
name = mpl.rcParams['image.cmap']
if isinstance(name, colors.Colormap):
return name
if name in cmap_d:
if lut is None:
return cmap_d[name]
elif name in datad:
return _generate_cmap(name, lut)
else:
raise ValueError(
"Colormap %s is not recognized. Possible values are: %s"
% (name, ', '.join(cmap_d.keys())))
class ScalarMappable:
"""
This is a mixin class to support scalar data to RGBA mapping.
The ScalarMappable makes use of data normalization before returning
RGBA colors from the given colormap.
"""
def __init__(self, norm=None, cmap=None):
r"""
Parameters
----------
norm : :class:`matplotlib.colors.Normalize` instance
The normalizing object which scales data, typically into the
interval ``[0, 1]``.
cmap : str or :class:`~matplotlib.colors.Colormap` instance
The colormap used to map normalized data values to RGBA colors.
"""
self.callbacksSM = cbook.CallbackRegistry()
if cmap is None:
cmap = get_cmap()
if norm is None:
norm = colors.Normalize()
self._A = None
#: The Normalization instance of this ScalarMappable.
self.norm = norm
#: The Colormap instance of this ScalarMappable.
self.cmap = get_cmap(cmap)
#: The last colorbar associated with this ScalarMappable. May be None.
self.colorbar = None
self.update_dict = {'array': False}
@cbook.deprecated('1.3', alternative='the colorbar attribute')
def set_colorbar(self, im, ax):
"""set the colorbar and axes instances associated with mappable"""
self.colorbar = im
def to_rgba(self, x, alpha=None, bytes=False):
"""
Return a normalized rgba array corresponding to *x*.
In the normal case, *x* is a 1-D or 2-D sequence of scalars, and
the corresponding ndarray of rgba values will be returned,
based on the norm and colormap set for this ScalarMappable.
There is one special case, for handling images that are already
rgb or rgba, such as might have been read from an image file.
If *x* is an ndarray with 3 dimensions,
and the last dimension is either 3 or 4, then it will be
treated as an rgb or rgba array, and no mapping will be done.
If the last dimension is 3, the *alpha* kwarg (defaulting to 1)
will be used to fill in the transparency. If the last dimension
is 4, the *alpha* kwarg is ignored; it does not
replace the pre-existing alpha. A ValueError will be raised
if the third dimension is other than 3 or 4.
In either case, if *bytes* is *False* (default), the rgba
array will be floats in the 0-1 range; if it is *True*,
the returned rgba array will be uint8 in the 0 to 255 range.
Note: this method assumes the input is well-behaved; it does
not check for anomalies such as *x* being a masked rgba
array, or being an integer type other than uint8, or being
a floating point rgba array with values outside the 0-1 range.
"""
# First check for special case, image input:
try:
if x.ndim == 3:
if x.shape[2] == 3:
if alpha is None:
alpha = 1
if x.dtype == np.uint8:
alpha = np.uint8(alpha * 255)
m, n = x.shape[:2]
xx = np.empty(shape=(m, n, 4), dtype=x.dtype)
xx[:, :, :3] = x
xx[:, :, 3] = alpha
elif x.shape[2] == 4:
xx = x
else:
raise ValueError("third dimension must be 3 or 4")
if bytes and xx.dtype != np.uint8:
xx = (xx * 255).astype(np.uint8)
if not bytes and xx.dtype == np.uint8:
xx = xx.astype(float) / 255
return xx
except AttributeError:
# e.g., x is not an ndarray; so try mapping it
pass
# This is the normal case, mapping a scalar array:
x = ma.asarray(x)
x = self.norm(x)
x = self.cmap(x, alpha=alpha, bytes=bytes)
return x
def set_array(self, A):
'Set the image array from numpy array *A*'
self._A = A
self.update_dict['array'] = True
def get_array(self):
'Return the array'
return self._A
def get_cmap(self):
'return the colormap'
return self.cmap
def get_clim(self):
'return the min, max of the color limits for image scaling'
return self.norm.vmin, self.norm.vmax
def set_clim(self, vmin=None, vmax=None):
"""
set the norm limits for image scaling; if *vmin* is a length2
sequence, interpret it as ``(vmin, vmax)`` which is used to
support setp
ACCEPTS: a length 2 sequence of floats
"""
if (vmin is not None and vmax is None and
cbook.iterable(vmin) and len(vmin) == 2):
vmin, vmax = vmin
if vmin is not None:
self.norm.vmin = vmin
if vmax is not None:
self.norm.vmax = vmax
self.changed()
def set_cmap(self, cmap):
"""
set the colormap for luminance data
ACCEPTS: a colormap or registered colormap name
"""
cmap = get_cmap(cmap)
self.cmap = cmap
self.changed()
def set_norm(self, norm):
'set the normalization instance'
if norm is None:
norm = colors.Normalize()
self.norm = norm
self.changed()
def autoscale(self):
"""
Autoscale the scalar limits on the norm instance using the
current array
"""
if self._A is None:
raise TypeError('You must first set_array for mappable')
self.norm.autoscale(self._A)
self.changed()
def autoscale_None(self):
"""
Autoscale the scalar limits on the norm instance using the
current array, changing only limits that are None
"""
if self._A is None:
raise TypeError('You must first set_array for mappable')
self.norm.autoscale_None(self._A)
self.changed()
def add_checker(self, checker):
"""
Add an entry to a dictionary of boolean flags
that are set to True when the mappable is changed.
"""
self.update_dict[checker] = False
def check_update(self, checker):
"""
If mappable has changed since the last check,
return True; else return False
"""
if self.update_dict[checker]:
self.update_dict[checker] = False
return True
return False
def changed(self):
"""
Call this whenever the mappable is changed to notify all the
callbackSM listeners to the 'changed' signal
"""
self.callbacksSM.process('changed', self)
for key in self.update_dict:
self.update_dict[key] = True
| mit |
mikewiebe-ansible/ansible | hacking/cgroup_perf_recap_graph.py | 54 | 4384 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2018, Matt Martz <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import argparse
import csv
from collections import namedtuple
try:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
except ImportError:
raise SystemExit('matplotlib is required for this script to work')
Data = namedtuple('Data', ['axis_name', 'dates', 'names', 'values'])
def task_start_ticks(dates, names):
item = None
ret = []
for i, name in enumerate(names):
if name == item:
continue
item = name
ret.append((dates[i], name))
return ret
def create_axis_data(filename, relative=False):
x_base = None if relative else 0
axis_name, dummy = os.path.splitext(os.path.basename(filename))
dates = []
names = []
values = []
with open(filename) as f:
reader = csv.reader(f)
for row in reader:
if x_base is None:
x_base = float(row[0])
dates.append(mdates.epoch2num(float(row[0]) - x_base))
names.append(row[1])
values.append(float(row[3]))
return Data(axis_name, dates, names, values)
def create_graph(data1, data2, width=11.0, height=8.0, filename='out.png', title=None):
fig, ax1 = plt.subplots(figsize=(width, height), dpi=300)
task_ticks = task_start_ticks(data1.dates, data1.names)
ax1.grid(linestyle='dashed', color='lightgray')
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%X'))
ax1.plot(data1.dates, data1.values, 'b-')
if title:
ax1.set_title(title)
ax1.set_xlabel('Time')
ax1.set_ylabel(data1.axis_name, color='b')
for item in ax1.get_xticklabels():
item.set_rotation(60)
ax2 = ax1.twiny()
ax2.set_xticks([x[0] for x in task_ticks])
ax2.set_xticklabels([x[1] for x in task_ticks])
ax2.grid(axis='x', linestyle='dashed', color='lightgray')
ax2.xaxis.set_ticks_position('bottom')
ax2.xaxis.set_label_position('bottom')
ax2.spines['bottom'].set_position(('outward', 86))
ax2.set_xlabel('Task')
ax2.set_xlim(ax1.get_xlim())
for item in ax2.get_xticklabels():
item.set_rotation(60)
ax3 = ax1.twinx()
ax3.plot(data2.dates, data2.values, 'g-')
ax3.set_ylabel(data2.axis_name, color='g')
fig.tight_layout()
fig.savefig(filename, format='png')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('files', nargs=2, help='2 CSV files produced by cgroup_perf_recap to graph together')
parser.add_argument('--relative', default=False, action='store_true',
help='Use relative dates instead of absolute')
parser.add_argument('--output', default='out.png', help='output path of PNG file: Default %s(default)s')
parser.add_argument('--width', type=float, default=11.0,
help='Width of output image in inches. Default %(default)s')
parser.add_argument('--height', type=float, default=8.0,
help='Height of output image in inches. Default %(default)s')
parser.add_argument('--title', help='Title for graph')
return parser.parse_args()
def main():
args = parse_args()
data1 = create_axis_data(args.files[0], relative=args.relative)
data2 = create_axis_data(args.files[1], relative=args.relative)
create_graph(data1, data2, width=args.width, height=args.height, filename=args.output, title=args.title)
print('Graph written to %s' % os.path.abspath(args.output))
if __name__ == '__main__':
main()
| gpl-3.0 |
cgre-aachen/gempy | gempy/plot/_plot.py | 1 | 14507 | """
This file is part of gempy.
gempy is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
gempy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with gempy. If not, see <http://www.gnu.org/licenses/>.
Module with classes and methods to perform implicit regional modelling based on
the potential field method.
Tested on Ubuntu 16
Created on 10/04/2018
@author: Elisa Heim, Miguel de la Varga
"""
# This is for sphenix to find the packages
# sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
from typing import Set, Tuple, Dict, Union
import gempy as _gempy
from ._visualization_2d import PlotData2D, PlotSolution
from .visualization_3d import GemPyvtkInteract
def plot_data_3D(geo_data, ve=1, **kwargs):
"""
Plot in vtk all the input data of a model
Args:
geo_data (gempy.DataManagement.InputData): Input data of the model
Returns:
None
"""
vv = GemPyvtkInteract(geo_data, ve=ve, **kwargs)
# vv.restart()
vv.set_surface_points()
vv.set_orientations()
vv.render_model(**kwargs)
return vv
def plot_3D(geo_model, render_surfaces=True, render_data=True,
render_topography=True,
real_time=False, **kwargs):
"""
Plot in vtk all the input data of a model
Args:
geo_model (gempy.DataManagement.InputData): Input data of the model
Returns:
None
"""
vv = GemPyvtkInteract(geo_model, real_time=real_time, **kwargs)
# vv.restart()
if render_data is True:
vv.set_surface_points()
vv.set_orientations()
if render_surfaces is True:
vv.set_surfaces(geo_model._surfaces)
if render_topography is True and geo_model._grid.topography is not None:
vv.set_topography()
vv.render_model(**kwargs)
return vv
def export_to_vtk(geo_data, path=None, name=None, voxels=True, block=None, surfaces=True):
"""
Export data to a vtk file for posterior visualizations
Args:
geo_data(:class:`Model`)
path(str): path to the location of the vtk
name(str): Name of the files. Default name: Default
voxels(bool): if True export lith_block
block(Optional[np.array]): One of the solutions of the regular grid. This can be used if for
example you want to export an scalar field or an specific series block. If None is passed, lith_block
will be exported.
surfaces(bool): If True, export the polydata surfaces.
Returns:
None
"""
if voxels is True:
GemPyvtkInteract.export_vtk_lith_block(geo_data, lith_block=block,
path=path)
if surfaces is True:
geo_data.solutions.compute_all_surfaces()
ver, sim = _gempy.get_surfaces(geo_data)
GemPyvtkInteract.export_vtk_surfaces(geo_data, ver, sim, path=path,
name=name)
return True
def plot_data(geo_data, direction="y", data_type='all', series="all",
show_legend=True, **kwargs):
"""
Plot the projection of the raw data (surface_points and orientations) in 2D following a
specific directions
Args:
direction(str): xyz. Caartesian direction to be plotted
series(str): series to plot
ve(float): Vertical exageration
**kwargs: seaborn lmplot key arguments. (TODO: adding the link to them)
Returns:
None
"""
plot = PlotData2D(geo_data)
p = plot.plot_data(direction=direction, data_type=data_type, series=series,
show_legend=show_legend, **kwargs)
# TODO saving options
return plot
def plot_stereonet(geo_data, litho=None, planes=True, poles=True,
single_plots=False,
show_density=False):
'''
Plot an equal-area projection of the orientations dataframe using mplstereonet.
Args:
geo_model (gempy.DataManagement.InputData): Input data of the model
series_only: To select whether a stereonet is plotted perries or per formation
litho: selection of formation or series names, as list. If None, all are plotted
planes: If True, azimuth and dip are plotted as great circles
poles: If True, pole points (plane normal vectors) of azimuth and dip are plotted
single_plots: If True, each formation is plotted in a single stereonet
show_density: If True, density contour plot around the pole points is shown
Returns:
None
'''
plot = PlotData2D(geo_data)
plot.plot_stereonet(litho=litho, planes=planes, poles=poles,
single_plots=single_plots,
show_density=show_density)
def plot_map(model, contour_lines=True, show_data=True, show_hillshades: bool = False, figsize=(12, 12), **kwargs):
"""
Args:
figsize:
show_hillshades:
model:
contour_lines:
show_faults:
show_data:
**kwargs
Returns:
"""
plot = PlotSolution(model)
plot.plot_map(contour_lines=contour_lines, show_data=show_data, show_hillshades=show_hillshades,
figsize=figsize, **kwargs)
def plot_section_traces(model, section_names=None, contour_lines=False,
show_data=True, show_all_data=False):
"""
Args:
model:
show_data:
section_names:
contour_lines:
Returns:
"""
plot = PlotSolution(model)
if plot.model.solutions.geological_map is not None:
plot.plot_map(contour_lines=contour_lines, show_data=show_data,
show_all_data=show_all_data)
# else:
# fig = plt.figure()
# plt.title('Section traces, z direction')
plot.plot_section_traces(show_data=show_data, section_names=section_names,
contour_lines=contour_lines,
show_all_data=show_all_data)
"""
def plot_predef_sections(model, show_traces=True, show_data=False, section_names=None, show_faults=True,
show_topo=True, figsize=(12, 12)):
Args:
model:
show_traces:
show_data:
section_names:
show_faults:
show_topo:
figsize:
Returns:
plot = PlotSolution(model)
plot.plot_sections(show_traces=show_traces, show_data=show_data, section_names=section_names,
show_faults=show_faults, show_topo=show_topo, figsize=figsize)
"""
def plot_section_by_name(model, section_name, show_faults=True, show_topo=True,
show_data=True,
show_all_data=False, radius='default',
contourplot=True):
# Todo needs more keywords:
### if show_data: radius, data_type
plot = PlotSolution(model)
plot.plot_section_by_name(section_name=section_name, show_topo=show_topo,
show_faults=show_faults,
show_data=show_data, show_all_data=show_all_data,
radius=radius, contourplot=contourplot)
def plot_all_sections(model, show_data=False, section_names=None,
show_topo=True, figsize=(12, 12)):
plot = PlotSolution(model)
plot.plot_all_sections(show_data=show_data, section_names=section_names,
show_topo=show_topo,
figsize=figsize)
def plot_section(model, cell_number=13, block=None, direction="y",
interpolation='none',
show_data=True, show_faults=True, show_topo=False,
block_type=None, ve=1,
show_all_data=False, show_legend=True, **kwargs):
"""
Plot a section of the block model
Args:
cell_number(int): position of the array to plot
direction(str): xyz. Caartesian direction to be plotted
interpolation(str): Type of interpolation of plt.imshow. Default 'none'. Acceptable values are 'none'
,'nearest', 'bilinear', 'bicubic',
'spline16', 'spline36', 'hanning', 'hamming', 'hermite', 'kaiser',
'quadric', 'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc',
'lanczos'
ve(float): Vertical exageration
**kwargs: imshow keywargs
Returns:
None
"""
plot = PlotSolution(model)
plot.fig = plot.plot_block_section(model.solutions, cell_number, block,
direction, interpolation,
show_data, show_faults, show_topo,
block_type, ve,
show_all_data=show_all_data,
show_legend=show_legend, **kwargs)
return plot
def plot_scalar_field(model, cell_number, N=20,
direction="y", block=None, alpha=0.6, show_data=True,
show_all_data=False, series=0, *args, **kwargs):
"""
Plot a potential field in a given direction.
Args:
cell_number(int): position of the array to plot
potential_field(str): name of the potential field (or series) to plot
n_pf(int): number of the potential field (or series) to plot
direction(str): xyz. Caartesian direction to be plotted
serie: *Deprecated*
**kwargs: plt.contour kwargs
Returns:
None
"""
plot = PlotSolution(model)
if block is not None:
block = block
else:
block = model.solutions
plot.plot_scalar_field(block, cell_number, N=N,
direction=direction, show_data=show_data,
series=series, alpha=alpha,
show_all_data=show_all_data,
*args, **kwargs)
def plot_section_scalarfield(model, section_name, sn, levels=50,
show_faults=True, show_topo=True, lithback=True):
"""
Plot the potential field in the predefined sections.
Args:
model:
section_name: name of the section
sn: scalar field number, order like in model.series
levels: number of isolines you want to plot
show_faults: whether or not faults should be plotted
show_topo: whether or not the topography should be plotted
lithback: lithology background
Returns:
None
"""
plot = PlotSolution(model)
plot.plot_section_scalarfield(section_name=section_name, sn=sn,
levels=levels, show_faults=show_faults,
show_topo=show_topo, lithback=lithback)
def plot_gradient(geo_data, scalar_field, gx, gy, gz, cell_number, q_stepsize=5,
direction="y", plot_scalar=True, **kwargs):
"""
Plot the gradient of the scalar field in a given direction.
Args:
geo_data (gempy.DataManagement.InputData): Input data of the model
scalar_field(numpy.array): scalar field to plot with the gradient
gx(numpy.array): gradient in x-direction
gy(numpy.array): gradient in y-direction
gz(numpy.array): gradient in z-direction
cell_number(int): position of the array to plot
q_stepsize(int): step size between arrows to indicate gradient
direction(str): xyz. Caartesian direction to be plotted
plot_scalar(bool): boolean to plot scalar field
**kwargs: plt.contour kwargs
Returns:
None
"""
plot = PlotSolution(geo_data)
plot.plot_gradient(scalar_field, gx, gy, gz, cell_number,
q_stepsize=q_stepsize,
direction=direction, plot_scalar=plot_scalar,
**kwargs)
def plot_topology(
geo_model,
edges: Set[Tuple[int, int]],
centroids: Dict,
direction: Union["x", "y", "z"] = "y",
scale: bool = True,
label_kwargs: dict = None,
edge_kwargs: dict = None
):
"""Plot the topology adjacency graph in 2-D.
Args:
geo_model ([type]): GemPy geomodel instance.
edges (Set[Tuple[int, int]]): Set of topology edges.
centroids (Dict[int, Array[int, 3]]): Dictionary of topology id's and
their centroids.
direction (Union["x", "y", "z", optional): Section direction.
Defaults to "y".
label_kwargs (dict, optional): Keyword arguments for topology labels.
Defaults to None.
edge_kwargs (dict, optional): Keyword arguments for topology edges.
Defaults to None.
"""
PlotSolution.plot_topo_g(
geo_model,
edges,
centroids,
direction=direction,
scale=scale,
label_kwargs=label_kwargs,
edge_kwargs=edge_kwargs
)
def plot_ar(geo_model, path=None, project_name=None, api_token=None, secret=None):
""" Create, upload and retrieve tag to visualize the model in AR in rexview
https://www.rexos.org/getting-started/
Args:
geo_model (gempy.Model):
path: Location for rex files. Default cwd
project_name: Name of the project in rexos
api_token: rexos api token
secret: rexos secret
Returns:
gempy.addons.rex_api.Rextag
"""
from gempy.addons.rex_api import upload_to_rexcloud
from gempy.addons.gempy_to_rexfile import write_rex, geomodel_to_rex
if project_name is None:
project_name = geo_model.meta.project_name
if path is None:
path = './'
rex_bytes = geomodel_to_rex(geo_model)
files_path = write_rex(rex_bytes, path)
project_name_ = project_name
for i in range(40):
try:
tag = upload_to_rexcloud(files_path, project_name=project_name_, api_token=api_token, secret=secret)
break
except ConnectionError:
project_name_ = project_name + str(i)
pass
return tag
| lgpl-3.0 |
espenhgn/nest-simulator | pynest/examples/twoneurons.py | 3 | 1260 | # -*- coding: utf-8 -*-
#
# twoneurons.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Two neuron example
----------------------------
See Also
~~~~~~~~~~
:doc:`one_neuron`
"""
import nest
import nest.voltage_trace
import matplotlib.pyplot as plt
weight = 20.0
delay = 1.0
stim = 1000.0
neuron1 = nest.Create("iaf_psc_alpha")
neuron2 = nest.Create("iaf_psc_alpha")
voltmeter = nest.Create("voltmeter")
neuron1.I_e = stim
nest.Connect(neuron1, neuron2, syn_spec={'weight': weight, 'delay': delay})
nest.Connect(voltmeter, neuron2)
nest.Simulate(100.0)
nest.voltage_trace.from_device(voltmeter)
plt.show()
| gpl-2.0 |
mickele77/FreeCAD | src/Mod/Plot/InitGui.py | 18 | 2920 | #***************************************************************************
#* *
#* Copyright (c) 2011, 2012 *
#* Jose Luis Cercos Pita <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
class PlotWorkbench(Workbench):
"""Workbench of Plot module."""
from plotUtils import Paths
import PlotGui
Icon = 'Icon.svg'
MenuText = "Plot"
ToolTip = ("The Plot module is used to edit/save output plots performed "
"by other tools")
def Initialize(self):
from PySide import QtCore, QtGui
cmdlst = ["Plot_SaveFig",
"Plot_Axes",
"Plot_Series",
"Plot_Grid",
"Plot_Legend",
"Plot_Labels",
"Plot_Positions"]
self.appendToolbar(str(QtCore.QT_TRANSLATE_NOOP(
"Plot",
"Plot edition tools")), cmdlst)
self.appendMenu(str(QtCore.QT_TRANSLATE_NOOP(
"Plot",
"Plot")), cmdlst)
try:
import matplotlib
except ImportError:
from PySide import QtCore, QtGui
msg = QtGui.QApplication.translate(
"plot_console",
"matplotlib not found, Plot module will be disabled",
None,
QtGui.QApplication.UnicodeUTF8)
FreeCAD.Console.PrintMessage(msg + '\n')
Gui.addWorkbench(PlotWorkbench())
| lgpl-2.1 |
bigdataelephants/scikit-learn | examples/applications/topics_extraction_with_nmf.py | 106 | 2313 | """
========================================================
Topics extraction with Non-Negative Matrix Factorization
========================================================
This is a proof of concept application of Non Negative Matrix
Factorization of the term frequency matrix of a corpus of documents so
as to extract an additive model of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware than the time complexity
is polynomial.
"""
# Author: Olivier Grisel <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import NMF
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
t0 = time()
print("Loading dataset and extracting TF-IDF features...")
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tfidf = vectorizer.fit_transform(dataset.data[:n_samples])
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with n_samples=%d and n_features=%d..."
% (n_samples, n_features))
nmf = NMF(n_components=n_topics, random_state=1).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
feature_names = vectorizer.get_feature_names()
for topic_idx, topic in enumerate(nmf.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
| bsd-3-clause |
migueldvb/george | document/code/exo_demo_1/results.py | 4 | 1288 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["results"]
import os
import triangle
import numpy as np
import cPickle as pickle
import matplotlib.pyplot as pl
def results(fn):
model, sampler = pickle.load(open(fn, "rb"))
mu = np.median(model.f)
ppm = lambda f: (f / mu - 1) * 1e6
# Plot the data.
fig = pl.figure(figsize=(6, 6))
ax = fig.add_subplot(111)
ax.plot(model.t, ppm(model.f), ".k")
ax.set_xlim(np.min(model.t), np.max(model.t))
ax.set_xlabel("time since transit [days]")
ax.set_ylabel("relative flux [ppm]")
fig.subplots_adjust(left=0.2, bottom=0.2, top=0.9, right=0.9)
# Plot the predictions.
samples = sampler.flatchain
t = np.linspace(model.t.min(), model.t.max(), 1000)
for i in np.random.randint(len(samples), size=10):
model.vector = samples[i]
ax.plot(t, ppm(model.predict(t)), color="#4682b4", alpha=0.5)
fig.savefig(os.path.splitext(fn)[0] + "-results.pdf")
# Plot the corner plot.
fig = triangle.corner(samples, labels=model.labels,
truths=model.true_vector)
fig.savefig(os.path.splitext(fn)[0] + "-triangle.png")
if __name__ == "__main__":
import sys
results(sys.argv[1])
| mit |
ndingwall/scikit-learn | examples/multioutput/plot_classifier_chain_yeast.py | 23 | 4637 | """
============================
Classifier Chain
============================
Example of using classifier chain on a multilabel dataset.
For this example we will use the `yeast
<https://www.openml.org/d/40597>`_ dataset which contains
2417 datapoints each with 103 features and 14 possible labels. Each
data point has at least one label. As a baseline we first train a logistic
regression classifier for each of the 14 labels. To evaluate the performance of
these classifiers we predict on a held-out test set and calculate the
:ref:`jaccard score <jaccard_similarity_score>` for each sample.
Next we create 10 classifier chains. Each classifier chain contains a
logistic regression model for each of the 14 labels. The models in each
chain are ordered randomly. In addition to the 103 features in the dataset,
each model gets the predictions of the preceding models in the chain as
features (note that by default at training time each model gets the true
labels as features). These additional features allow each chain to exploit
correlations among the classes. The Jaccard similarity score for each chain
tends to be greater than that of the set independent logistic models.
Because the models in each chain are arranged randomly there is significant
variation in performance among the chains. Presumably there is an optimal
ordering of the classes in a chain that will yield the best performance.
However we do not know that ordering a priori. Instead we can construct an
voting ensemble of classifier chains by averaging the binary predictions of
the chains and apply a threshold of 0.5. The Jaccard similarity score of the
ensemble is greater than that of the independent models and tends to exceed
the score of each chain in the ensemble (although this is not guaranteed
with randomly ordered chains).
"""
# Author: Adam Kleczewski
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_openml
from sklearn.multioutput import ClassifierChain
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import jaccard_score
from sklearn.linear_model import LogisticRegression
print(__doc__)
# Load a multi-label dataset from https://www.openml.org/d/40597
X, Y = fetch_openml('yeast', version=4, return_X_y=True)
Y = Y == 'TRUE'
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.2,
random_state=0)
# Fit an independent logistic regression model for each class using the
# OneVsRestClassifier wrapper.
base_lr = LogisticRegression()
ovr = OneVsRestClassifier(base_lr)
ovr.fit(X_train, Y_train)
Y_pred_ovr = ovr.predict(X_test)
ovr_jaccard_score = jaccard_score(Y_test, Y_pred_ovr, average='samples')
# Fit an ensemble of logistic regression classifier chains and take the
# take the average prediction of all the chains.
chains = [ClassifierChain(base_lr, order='random', random_state=i)
for i in range(10)]
for chain in chains:
chain.fit(X_train, Y_train)
Y_pred_chains = np.array([chain.predict(X_test) for chain in
chains])
chain_jaccard_scores = [jaccard_score(Y_test, Y_pred_chain >= .5,
average='samples')
for Y_pred_chain in Y_pred_chains]
Y_pred_ensemble = Y_pred_chains.mean(axis=0)
ensemble_jaccard_score = jaccard_score(Y_test,
Y_pred_ensemble >= .5,
average='samples')
model_scores = [ovr_jaccard_score] + chain_jaccard_scores
model_scores.append(ensemble_jaccard_score)
model_names = ('Independent',
'Chain 1',
'Chain 2',
'Chain 3',
'Chain 4',
'Chain 5',
'Chain 6',
'Chain 7',
'Chain 8',
'Chain 9',
'Chain 10',
'Ensemble')
x_pos = np.arange(len(model_names))
# Plot the Jaccard similarity scores for the independent model, each of the
# chains, and the ensemble (note that the vertical axis on this plot does
# not begin at 0).
fig, ax = plt.subplots(figsize=(7, 4))
ax.grid(True)
ax.set_title('Classifier Chain Ensemble Performance Comparison')
ax.set_xticks(x_pos)
ax.set_xticklabels(model_names, rotation='vertical')
ax.set_ylabel('Jaccard Similarity Score')
ax.set_ylim([min(model_scores) * .9, max(model_scores) * 1.1])
colors = ['r'] + ['b'] * len(chain_jaccard_scores) + ['g']
ax.bar(x_pos, model_scores, alpha=0.5, color=colors)
plt.tight_layout()
plt.show()
| bsd-3-clause |
larsmans/scikit-learn | examples/applications/plot_prediction_latency.py | 25 | 11317 | """
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[i, :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
xtick_names = plt.setp(ax1, xticklabels=cls_infos)
plt.setp(xtick_names)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[0])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
###############################################################################
# main code
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
| bsd-3-clause |
mellorjc/partition_kernel_gp | part_kernel.py | 1 | 8507 | import inspect
from numpy import ones, zeros, where, argmin, unique, array
from numpy import logical_and, logical_or, arange, sqrt
from numpy import maximum, pi, log
from numpy.random import choice
from numpy.linalg import norm, det
from scipy.stats import binom, uniform
from sklearn.metrics.pairwise import pairwise_distances
from scipy.spatial.distance import euclidean
from scipy.sparse.linalg import LinearOperator, cg
from random import randint
from numpy.random import randint as nrandint
from multiprocessing import Pool, Array
import ctypes
from numpy.ctypeslib import as_array
from sklearn.neighbors import KNeighborsClassifier as KClass
import pickle
import sys
_sharedX = None
_sharedX2 = None
def para_func(arg):
num, shape, metric, cnum = arg
X = _sharedX
centers = choice(X.shape[0], cnum, False)
mod = KClass(1, metric=metric)
mod.fit(X[centers, :], range(centers.size))
dist, m = mod.kneighbors(X, return_distance=True)
return m
def para_func2(arg):
num, shape, shape2, metric, cnum = arg
X = _sharedX
X2 = _sharedX2
centers = choice(X.shape[0], cnum, False)
mod = KClass(1, metric=metric)
mod.fit(X[centers, :], range(centers.size))
dista1, ma1 = mod.kneighbors(X, return_distance=True)
distb1, mb1 = mod.kneighbors(X2, return_distance=True)
mall = ma1
mall2 = mb1
return mall2, mall
def initShared(X):
global _sharedX
_sharedX = X
def initShared2(X, X2):
global _sharedX
global _sharedX2
_sharedX = X
_sharedX2 = X2
def load_model(model_folder):
model = FastKernel(None, None, None)
with open(model_folder + "/model.cfg", 'r') as f:
model.X = load()
return model
class FastKernel:
def __init__(self, X, y, m=200, h=8, distance='euclidean', sigma=0.01, eps=0.05, num_proc=8):
self.cnum = 3*X.shape[0]//4
self.d = distance
self.X = X
self.y = y
self.num_proc = num_proc
self.v = None
self.m = m
self.h = h
self.sigma = sigma
self.eps = eps
self.cs = None
self.selected = False
# the number of centers for each m
if len(X.shape) == 1:
yt = 1
else:
x, yt = X.shape
if yt is None:
yt = 1
def _select_centers(self, X):
if self.selected:
return
if len(X.shape) == 1:
X = X.reshape((X.shape[0], 1))
self.selected = True
def K(self, X):
# the cluster class assigned to each example use
self._select_centers(X)
if len(X.shape) == 1:
X = X.reshape((X.shape[0], 1))
c = zeros((X.shape[0], self.m))
share_base = Array(ctypes.c_double, X.shape[0]*X.shape[1], lock=False)
share = as_array(share_base)
share = share.reshape(X.shape)
share[:, :] = X
if self.cs is None:
pool = Pool(self.num_proc, maxtasksperchild=50, initializer=initShared, initargs=[share])
cs = pool.imap(para_func, ((i, X.shape, self.d, self.cnum) for i in xrange(self.m)), 10)
self.cs = list(cs)
pool.close()
pool.join()
for i, cv in enumerate(self.cs):
c[:, i] = cv.flatten()
return c
def K2y(self, X, X2, y):
res = zeros(X.shape[0])
if len(X.shape) == 1:
X = X.reshape((X.shape[0], 1))
if len(X2.shape) == 1:
X2 = X2.reshape((X2.shape[0], 1))
share_base = Array(ctypes.c_double, X.shape[0]*X.shape[1], lock=False)
share = as_array(share_base)
share = share.reshape(X.shape)
share[:, :] = X
share2_base = Array(ctypes.c_double, X2.shape[0]*X2.shape[1], lock=False)
share2 = as_array(share2_base)
share2 = share2.reshape(X2.shape)
share2[:, :] = X2
pool = Pool(self.num_proc, maxtasksperchild=50, initializer=initShared2, initargs=[share2, share])
cs = pool.imap(para_func2, ((i, X2.shape, X.shape, self.d, self.cnum) for i in xrange(self.m)), 10)
for c, c2 in cs:
for cls in unique(c):
if cls > -1:
res[c.flatten() == cls] += y[c2.flatten() == cls].sum()
res /= self.m
pool.close()
pool.join()
return res
def K2(self, X, X2):
#if X.ndim == 0:
# X = X.reshape((1, 1))
#if X2.ndim == 0:
# X2 = X2.reshape((1, 1))
if X.ndim == 1:
X = X.reshape((X.shape[0], 1))
if X2.ndim == 1:
X2 = X2.reshape((X2.shape[0], 1))
if X.ndim == 0:
Xsh = 1
Xsh2 = 1
else:
Xsh = X.shape[0]
Xsh2 = X.shape[1]
if X2.ndim == 0:
X2sh = 1
X2sh2 = 1
else:
X2sh = X2.shape[0]
X2sh2 = X2.shape[1]
res = zeros((Xsh, X2sh))
share_base = Array(ctypes.c_double, Xsh*Xsh2, lock=False)
share = as_array(share_base)
share = share.reshape((Xsh, Xsh2))
share[:, :] = X
share2_base = Array(ctypes.c_double, X2sh*X2sh2, lock=False)
share2 = as_array(share2_base)
share2 = share2.reshape(X2.shape)
share2[:, :] = X2
pool = Pool(self.num_proc, maxtasksperchild=50, initializer=initShared2, initargs=[share2, share])
cs = pool.imap(para_func2, ((i, X2.shape, X.shape, self.d, self.cnum) for i in xrange(self.m)), 10)
for c, c2 in cs:
for i, c_v in enumerate(c):
for j, c_v2 in enumerate(c2):
if c_v == c_v2 and c_v != -1:
res[i, j] += 1.
res /= self.m
pool.close()
pool.join()
if X.ndim == 0:
res = res.flatten()
return res
def Ky(self, X, y):
if len(X.shape) == 1:
X = X.reshape((X.shape[0], 1))
res = zeros(X.shape[0])
c = self.K(X)
a = 1.0
#a = 0.95
for i in range(self.m):
for j in unique(c[:, i]):
if j < 0:
continue
ind = where(c[:, i] == j)[0]
for k in ind:
res[k] += (1.-a)*y[k] + a*y[ind].sum()
if (c[:, i] == -1).any():
res[c[:, i] == -1] += y[c[:, i] == -1] # JOE remove if not doing semi
res /= float(self.m)
return res
def B(self, X, y):
if len(X.shape) == 1:
X = X.reshape((X.shape[0], 1))
res = zeros(X.shape[0])
c = self.K(X)
for i in range(self.m):
for j in unique(c[:, i]):
ind = c[:, i] == j
if j < 0:
res[ind] += (1./(1. + self.sigma))*y[ind]
continue
res[ind] += (1./(float(where(ind)[0].size) + self.sigma))*y[ind].sum()
res /= self.m
res = (1./self.sigma)*y - res
return res
def train(self, X, y):
if self.v is None:
A = LinearOperator((X.shape[0], X.shape[0]), lambda x: self.Ky(X, x) + self.sigma*x)
M = LinearOperator((X.shape[0], X.shape[0]), lambda x: self.B(X, x))
self.v, info = cg(A, y, M=M, maxiter=40, tol=self.eps, callback=resid_callback)
def predict_mean(self, X2, X, y):
self.train(X, y)
self.cs = None
res = self.K2y(X2, X, self.v)
return res
def predict_var(self, X2, X, y):
vs = zeros(X2.shape[0])
for i in range(X2.shape[0]):
self.cs = None
# v = self.K2(X2[i, :], X2[i, :])
v = 1. # by definition of partition kernel K(x, x) = 1
A = LinearOperator((X.shape[0], X.shape[0]), lambda x: self.Ky(X, x) + self.sigma*x)
M = LinearOperator((X.shape[0], X.shape[0]), lambda x: self.B(X, x))
self.cs = None
if X2.ndim == 1:
k_star = self.K2(X2[i], X)
else:
k_star = self.K2(X2[i, :], X)
tmp, info = cg(A, k_star.T, M=M, maxiter=40, tol=self.eps)
vs[i] = v - k_star.dot(tmp)
return vs
def likelihood(self, X, y):
self.train(X, y)
A = self.K2(X, X)
res = -.5*y.dot(self.v)-y.shape[0]*log(2.*pi)-.5*log(det(A))
return res
def resid_callback(xk):
res = inspect.currentframe().f_back.f_locals['resid']
with open('residuals.dat', 'a') as f:
f.write('%s\n' % res)
| mit |
jayfans3/pu-learning | src/tests/breastCancer.py | 2 | 4863 | """
Created on Dec 22, 2012
@author: Alexandre
The goal of this test is to verifiy that the PUAdapter really allows a regular estimator to
achieve better accuracy in the case where the \"negative\" examples are contaminated with a
number of positive examples.
Here we use the breast cancer dataset from UCI. We purposely take a few malignant examples and
assign them the bening label and consider the bening examples as being \"unlabled\". We then compare
the performance of the estimator while using the PUAdapter and without using the PUAdapter. To
asses the performance, we use the F1 score, precision and recall.
Results show that PUAdapter greatly increases the performance of an estimator in the case where
the negative examples are contaminated with positive examples. We call this situation positive and
unlabled learning.
"""
import numpy as np
import matplotlib.pyplot as plt
from puLearning.puAdapter import PUAdapter
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import precision_recall_fscore_support
def load_breast_cancer(path):
f = open(path)
lines = f.readlines()
f.close()
examples = []
labels = []
for l in lines:
spt = l.split(',')
label = float(spt[-1])
feat = spt[:-1]
if '?' not in spt:
examples.append(feat)
labels.append(label)
return np.array(examples), np.array(labels)
if __name__ == '__main__':
np.random.seed(42)
print "Loading dataset"
print
X,y = load_breast_cancer('../datasets/breast-cancer-wisconsin.data')
#Shuffle dataset
print "Shuffling dataset"
print
permut = np.random.permutation(len(y))
X = X[permut]
y = y[permut]
#make the labels -1.,+1. I don't like 2 and 4 (:
y[np.where(y == 2)[0]] = -1.
y[np.where(y == 4)[0]] = +1.
print "Loaded ", len(y), " examples"
print len(np.where(y == -1.)[0])," are bening"
print len(np.where(y == +1.)[0])," are malignant"
print
#Split test/train
print "Splitting dataset in test/train sets"
print
split = 2*len(y)/3
X_train = X[:split]
y_train = y[:split]
X_test = X[split:]
y_test = y[split:]
print "Training set contains ", len(y_train), " examples"
print len(np.where(y_train == -1.)[0])," are bening"
print len(np.where(y_train == +1.)[0])," are malignant"
print
pu_f1_scores = []
reg_f1_scores = []
n_sacrifice_iter = range(0, len(np.where(y_train == +1.)[0])-21, 5)
for n_sacrifice in n_sacrifice_iter:
#send some positives to the negative class! :)
print "PU transformation in progress."
print "Making ", n_sacrifice, " malignant examples bening."
print
y_train_pu = np.copy(y_train)
pos = np.where(y_train == +1.)[0]
np.random.shuffle(pos)
sacrifice = pos[:n_sacrifice]
y_train_pu[sacrifice] = -1.
print "PU transformation applied. We now have:"
print len(np.where(y_train_pu == -1.)[0])," are bening"
print len(np.where(y_train_pu == +1.)[0])," are malignant"
print
#Get f1 score with pu_learning
print "PU learning in progress..."
estimator = RandomForestClassifier(n_estimators=100,
criterion='gini',
bootstrap=True,
n_jobs=1)
pu_estimator = PUAdapter(estimator)
pu_estimator.fit(X_train,y_train_pu)
y_pred = pu_estimator.predict(X_test)
precision, recall, f1_score, _ = precision_recall_fscore_support(y_test, y_pred)
pu_f1_scores.append(f1_score[1])
print "F1 score: ", f1_score[1]
print "Precision: ", precision[1]
print "Recall: ", recall[1]
print
#Get f1 score without pu_learning
print "Regular learning in progress..."
estimator = RandomForestClassifier(n_estimators=100,
bootstrap=True,
n_jobs=1)
estimator.fit(X_train,y_train_pu)
y_pred = estimator.predict(X_test)
precision, recall, f1_score, _ = precision_recall_fscore_support(y_test, y_pred)
reg_f1_scores.append(f1_score[1])
print "F1 score: ", f1_score[1]
print "Precision: ", precision[1]
print "Recall: ", recall[1]
print
print
plt.title("Random forest with/without PU learning")
plt.plot(n_sacrifice_iter, pu_f1_scores, label='PU Adapted Random Forest')
plt.plot(n_sacrifice_iter, reg_f1_scores, label='Random Forest')
plt.xlabel('Number of positive examples hidden in the unlabled set')
plt.ylabel('F1 Score')
plt.legend()
plt.show()
| bsd-3-clause |
morrisonwudi/zipline | zipline/utils/data.py | 31 | 12761 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import numpy as np
import pandas as pd
from copy import deepcopy
def _ensure_index(x):
if not isinstance(x, pd.Index):
x = pd.Index(sorted(x))
return x
class RollingPanel(object):
"""
Preallocation strategies for rolling window over expanding data set
Restrictions: major_axis can only be a DatetimeIndex for now
"""
def __init__(self,
window,
items,
sids,
cap_multiple=2,
dtype=np.float64,
initial_dates=None):
self._pos = window
self._window = window
self.items = _ensure_index(items)
self.minor_axis = _ensure_index(sids)
self.cap_multiple = cap_multiple
self.dtype = dtype
if initial_dates is None:
self.date_buf = np.empty(self.cap, dtype='M8[ns]') * pd.NaT
elif len(initial_dates) != window:
raise ValueError('initial_dates must be of length window')
else:
self.date_buf = np.hstack(
(
initial_dates,
np.empty(
window * (cap_multiple - 1),
dtype='datetime64[ns]',
),
),
)
self.buffer = self._create_buffer()
@property
def cap(self):
return self.cap_multiple * self._window
@property
def _start_index(self):
return self._pos - self._window
@property
def start_date(self):
return self.date_buf[self._start_index]
def oldest_frame(self, raw=False):
"""
Get the oldest frame in the panel.
"""
if raw:
return self.buffer.values[:, self._start_index, :]
return self.buffer.iloc[:, self._start_index, :]
def set_minor_axis(self, minor_axis):
self.minor_axis = _ensure_index(minor_axis)
self.buffer = self.buffer.reindex(minor_axis=self.minor_axis)
def set_items(self, items):
self.items = _ensure_index(items)
self.buffer = self.buffer.reindex(items=self.items)
def _create_buffer(self):
panel = pd.Panel(
items=self.items,
minor_axis=self.minor_axis,
major_axis=range(self.cap),
dtype=self.dtype,
)
return panel
def extend_back(self, missing_dts):
"""
Resizes the buffer to hold a new window with a new cap_multiple.
If cap_multiple is None, then the old cap_multiple is used.
"""
delta = len(missing_dts)
if not delta:
raise ValueError(
'missing_dts must be a non-empty index',
)
self._window += delta
self._pos += delta
self.date_buf = self.date_buf.copy()
self.date_buf.resize(self.cap)
self.date_buf = np.roll(self.date_buf, delta)
old_vals = self.buffer.values
shape = old_vals.shape
nan_arr = np.empty((shape[0], delta, shape[2]))
nan_arr.fill(np.nan)
new_vals = np.column_stack(
(nan_arr,
old_vals,
np.empty((shape[0], delta * (self.cap_multiple - 1), shape[2]))),
)
self.buffer = pd.Panel(
data=new_vals,
items=self.items,
minor_axis=self.minor_axis,
major_axis=np.arange(self.cap),
dtype=self.dtype,
)
# Fill the delta with the dates we calculated.
where = slice(self._start_index, self._start_index + delta)
self.date_buf[where] = missing_dts
def add_frame(self, tick, frame, minor_axis=None, items=None):
"""
"""
if self._pos == self.cap:
self._roll_data()
values = frame
if isinstance(frame, pd.DataFrame):
values = frame.values
self.buffer.values[:, self._pos, :] = values.astype(self.dtype)
self.date_buf[self._pos] = tick
self._pos += 1
def get_current(self, item=None, raw=False, start=None, end=None):
"""
Get a Panel that is the current data in view. It is not safe to persist
these objects because internal data might change
"""
item_indexer = slice(None)
if item:
item_indexer = self.items.get_loc(item)
start_index = self._start_index
end_index = self._pos
# get inital date window
where = slice(start_index, end_index)
current_dates = self.date_buf[where]
def convert_datelike_to_long(dt):
if isinstance(dt, pd.Timestamp):
return dt.asm8
if isinstance(dt, datetime.datetime):
return np.datetime64(dt)
return dt
# constrict further by date
if start:
start = convert_datelike_to_long(start)
start_index += current_dates.searchsorted(start)
if end:
end = convert_datelike_to_long(end)
_end = current_dates.searchsorted(end, 'right')
end_index -= len(current_dates) - _end
where = slice(start_index, end_index)
values = self.buffer.values[item_indexer, where, :]
current_dates = self.date_buf[where]
if raw:
# return copy so we can change it without side effects here
return values.copy()
major_axis = pd.DatetimeIndex(deepcopy(current_dates), tz='utc')
if values.ndim == 3:
return pd.Panel(values, self.items, major_axis, self.minor_axis,
dtype=self.dtype)
elif values.ndim == 2:
return pd.DataFrame(values, major_axis, self.minor_axis,
dtype=self.dtype)
def set_current(self, panel):
"""
Set the values stored in our current in-view data to be values of the
passed panel. The passed panel must have the same indices as the panel
that would be returned by self.get_current.
"""
where = slice(self._start_index, self._pos)
self.buffer.values[:, where, :] = panel.values
def current_dates(self):
where = slice(self._start_index, self._pos)
return pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc')
def _roll_data(self):
"""
Roll window worth of data up to position zero.
Save the effort of having to expensively roll at each iteration
"""
self.buffer.values[:, :self._window, :] = \
self.buffer.values[:, -self._window:, :]
self.date_buf[:self._window] = self.date_buf[-self._window:]
self._pos = self._window
@property
def window_length(self):
return self._window
class MutableIndexRollingPanel(object):
"""
A version of RollingPanel that exists for backwards compatibility with
batch_transform. This is a copy to allow behavior of RollingPanel to drift
away from this without breaking this class.
This code should be considered frozen, and should not be used in the
future. Instead, see RollingPanel.
"""
def __init__(self, window, items, sids, cap_multiple=2, dtype=np.float64):
self._pos = 0
self._window = window
self.items = _ensure_index(items)
self.minor_axis = _ensure_index(sids)
self.cap_multiple = cap_multiple
self.cap = cap_multiple * window
self.dtype = dtype
self.date_buf = np.empty(self.cap, dtype='M8[ns]')
self.buffer = self._create_buffer()
def _oldest_frame_idx(self):
return max(self._pos - self._window, 0)
def oldest_frame(self, raw=False):
"""
Get the oldest frame in the panel.
"""
if raw:
return self.buffer.values[:, self._oldest_frame_idx(), :]
return self.buffer.iloc[:, self._oldest_frame_idx(), :]
def set_sids(self, sids):
self.minor_axis = _ensure_index(sids)
self.buffer = self.buffer.reindex(minor_axis=self.minor_axis)
def _create_buffer(self):
panel = pd.Panel(
items=self.items,
minor_axis=self.minor_axis,
major_axis=range(self.cap),
dtype=self.dtype,
)
return panel
def get_current(self):
"""
Get a Panel that is the current data in view. It is not safe to persist
these objects because internal data might change
"""
where = slice(self._oldest_frame_idx(), self._pos)
major_axis = pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc')
return pd.Panel(self.buffer.values[:, where, :], self.items,
major_axis, self.minor_axis, dtype=self.dtype)
def set_current(self, panel):
"""
Set the values stored in our current in-view data to be values of the
passed panel. The passed panel must have the same indices as the panel
that would be returned by self.get_current.
"""
where = slice(self._oldest_frame_idx(), self._pos)
self.buffer.values[:, where, :] = panel.values
def current_dates(self):
where = slice(self._oldest_frame_idx(), self._pos)
return pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc')
def _roll_data(self):
"""
Roll window worth of data up to position zero.
Save the effort of having to expensively roll at each iteration
"""
self.buffer.values[:, :self._window, :] = \
self.buffer.values[:, -self._window:, :]
self.date_buf[:self._window] = self.date_buf[-self._window:]
self._pos = self._window
def add_frame(self, tick, frame, minor_axis=None, items=None):
"""
"""
if self._pos == self.cap:
self._roll_data()
if isinstance(frame, pd.DataFrame):
minor_axis = frame.columns
items = frame.index
if set(minor_axis).difference(set(self.minor_axis)) or \
set(items).difference(set(self.items)):
self._update_buffer(frame)
vals = frame.T.astype(self.dtype)
self.buffer.loc[:, self._pos, :] = vals
self.date_buf[self._pos] = tick
self._pos += 1
def _update_buffer(self, frame):
# Get current frame as we only need to care about the data that is in
# the active window
old_buffer = self.get_current()
if self._pos >= self._window:
# Don't count the last major_axis entry if we're past our window,
# since it's about to roll off the end of the panel.
old_buffer = old_buffer.iloc[:, 1:, :]
nans = pd.isnull(old_buffer)
# Find minor_axes that have only nans
# Note that minor is axis 2
non_nan_cols = set(old_buffer.minor_axis[~np.all(nans, axis=(0, 1))])
# Determine new columns to be added
new_cols = set(frame.columns).difference(non_nan_cols)
# Update internal minor axis
self.minor_axis = _ensure_index(new_cols.union(non_nan_cols))
# Same for items (fields)
# Find items axes that have only nans
# Note that items is axis 0
non_nan_items = set(old_buffer.items[~np.all(nans, axis=(1, 2))])
new_items = set(frame.index).difference(non_nan_items)
self.items = _ensure_index(new_items.union(non_nan_items))
# :NOTE:
# There is a simpler and 10x faster way to do this:
#
# Reindex buffer to update axes (automatically adds nans)
# self.buffer = self.buffer.reindex(items=self.items,
# major_axis=np.arange(self.cap),
# minor_axis=self.minor_axis)
#
# However, pandas==0.12.0, for which we remain backwards compatible,
# has a bug in .reindex() that this triggers. Using .update() as before
# seems to work fine.
new_buffer = self._create_buffer()
new_buffer.update(
self.buffer.loc[non_nan_items, :, non_nan_cols])
self.buffer = new_buffer
| apache-2.0 |
chenyyx/scikit-learn-doc-zh | examples/zh/cluster/plot_color_quantization.py | 61 | 3444 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1])
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| gpl-3.0 |
moserand/crosswater | crosswater/routing_model/convert_hdf_tributaries.py | 1 | 3005 | """Convert the HDF5 from one group with one table per timestep into a table
with one group with one table per catchment.
This converter is for the aggregation per tribuary.
"""
import numpy as np
import tables
import fnmatch
from crosswater.read_config import read_config
from crosswater.tools.time_helper import ProgressDisplay
class Convert(object):
"""Convert table per timestep to table per catchment
"""
def __init__(self, config_file):
config = read_config(config_file)
self.input_file_name = config['routing_model']['input_steps_path']
self.output_file_name = config['routing_model']['output_catchments_path'] ### missing in current config file
def _open_files(self):
"""Open HDF5 input and output files.
"""
self.hdf_input = tables.open_file(self.input_file_name, mode = 'r')
self.hdf_output = tables.open_file(self.output_file_name, mode = 'w',
title='Crosswater aggregated results per catchment')
def _close_files(self):
"""Close HDF5 input and output files.
"""
self.hdf_input.close()
self.hdf_output.close()
def count_steps(self, input_file):
"""Count timesteps
"""
node = self.hdf_input.get_node('/')
node_names = [i._v_name for i in node._f_list_nodes()]
steps = fnmatch.filter(node_names,'step_*')
return len(steps)
def get_ids(self, input_file):
"""Get catchment ids
"""
node_0 = self.hdf_input.get_node('/', 'step_0/values')
ids = node_0.col('catchment_outlet')
return ids
def _get_values(self,id_):
"""Get values for one catchment and all timesteps
"""
values = np.empty(shape=(self.steps,1), dtype=[('discharge', '<f8'), ('load_aggregated', '<f8')])
for step in range(self.steps):
in_table = self.hdf_input.get_node('/', 'step_{}/values'.format(step))
row = in_table.read_where('catchment_outlet==id_')[['discharge', 'load_aggregated']]
values[step] = row
in_table.flush()
return values
def convert(self):
"""Write values to output table
"""
filters = tables.Filters(complevel=5, complib='zlib')
for id_ in self.ids:
#out_table = pandas.DataFrame(index=range(self.steps), columns=['discharge', 'load'])
values = self._get_values(id_)
group = self.hdf_output.create_group('/', 'catch_{}'.format(int(id_)))
self.hdf_output.create_table(group, 'values', values, filters=filters)
def run(self):
"""Run thread.
"""
self._open_files()
self.steps = self.count_steps(self.hdf_input)
self.ids = self.get_ids(self.hdf_input)
self.convert()
self._close_files()
| gpl-3.0 |
SDK/metadatachecker | sacm/utils.py | 1 | 14423 | __author__ = 'sdk'
from time import strftime, gmtime, mktime
import datetime
from xml.dom import minidom
import numpy as np
import cx_Oracle
from password import databaseSCO as database
import pandas as pd
pd.options.mode.chained_assignment = None
tables = {"ASDM": "XML_ASDM_ENTITIES", "Main": "XML_MAINTABLE_ENTITIES",
"AlmaRadiometer": "XML_ALMARADIOMETERTAB_ENTITIES", "Antenna": "XML_ANTENNATABLE_ENTITIES",
"CalAmpli": "XML_CALAMPLITABLE_ENTITIES", "CalAtmosphere": "XML_CALATMOSPHERETABL_ENTITIES",
"CalCurve": "XML_CALCURVETABLE_ENTITIES", "CalSeeing": "XML_CALSEEINGTABLE_ENTITIES",
"CalWVR": "XML_CALWVRTABLE_ENTITIES", "CalData": "XML_CALDATATABLE_ENTITIES",
"CalDelay": "XML_CALDELAYTABLE_ENTITIES", "CalDevice": "XML_CALDEVICETABLE_ENTITIES",
"CalFlux": "XML_CALFLUXTABLE_ENTITIES", "CalPhase": "XML_CALPHASETABLE_ENTITIES",
"CalReduction": "XML_CALREDUCTIONTABLE_ENTITIES", "ConfigDescription": "XML_CONFIGDESCRIPTION_ENTITIES",
"CorrelatorMode": "XML_CORRELATORMODETAB_ENTITIES", "DataDescription": "XML_DATADESCRIPTIONTA_ENTITIES",
"ExecBlock": "XML_EXECBLOCKTABLE_ENTITIES", "Feed": "XML_FEEDTABLE_ENTITIES",
"Annotation": "XML_ANNOTATIONTABLE_ENTITIES", "Ephemeris": "XML_EPHEMERISTABLE_ENTITIES",
"Anotation": "XML_ANNOTATIONTABLE_ENTITIES", "CalBandpass": "XML_CALBANDPASSTABLE_ENTITIES",
"CalPointing": "XML_CALPOINTINGTABLE_ENTITIES", "Field": "XML_FIELDTABLE_ENTITIES",
"Flag": "XML_FLAGTABLE_ENTITIES", "Focus": "XML_FOCUSTABLE_ENTITIES",
"FocusModel": "XML_FOCUSMODELTABLE_ENTITIES", "Pointing": "XML_POINTINGTABLE_ENTITIES",
"PointingModel": "XML_POINTINGMODELTABL_ENTITIES", "Polarization": "XML_POLARIZATIONTABLE_ENTITIES",
"Processor": "XML_PROCESSORTABLE_ENTITIES", "Receiver": "XML_RECEIVERTABLE_ENTITIES",
"SBSummary": "XML_SBSUMMARYTABLE_ENTITIES", "Scan": "XML_SCANTABLE_ENTITIES",
"Source": "XML_SOURCETABLE_ENTITIES", "SpectralWindow": "XML_SPECTRALWINDOWTAB_ENTITIES",
"State": "XML_STATETABLE_ENTITIES", "Station": "XML_STATIONTABLE_ENTITIES", "Subscan": "XML_SUBSCANTABLE_ENTITIES",
"SquareLawDetector": "XML_SQUARELAWDETECTOR_ENTITIES", "SwitchCycle": "XML_SWITCHCYCLETABLE_ENTITIES",
"SysCal": "XML_SYSCALTABLE_ENTITIES", "Weather": "XML_WEATHERTABLE_ENTITIES",
"SchedBlock":"XML_SCHEDBLOCK_ENTITIES", "ObsProject":"XML_OBSPROJECT_ENTITIES"}
def sdmTimeString(number=None):
"""
Convert a time value (as used by ASDM, i.e. MJD in nanoseconds) into a FITS type string.
:param number:
"""
st = number/1000000000L
# decimal microseconds ...
number = (number-st*1000000000L)/1000
# number of seconds since 1970-01-01T00:00:00
st = st-3506716800L
return strftime("%Y-%m-%dT%H:%M:%S", gmtime(st))+(".%6.6d" % number)
def gtm(t=None):
"""
Convert a time value (as used by ASDM, i.e. MJD in nanoseconds) into a FITS type string.
:param t:
"""
st = t-3506716800000000000L
return st/1000000000L
def gtm2(number=None):
"""
Convert a time value (as used by ASDM, i.e. MJD in nanoseconds) into a FITS type string.
:param number:
"""
st = number/1000000000L
# decimal microseconds ...
number = (number-st*1000000000L)/1000
# number of seconds since 1970-01-01T00:00:00
st = st-3506716800L
return datetime.datetime.fromtimestamp(mktime(gmtime(st))).replace(microsecond=(number))
def returnMAXPWVC(pwv=None):
if pwv <= 0.472:
return 0.472
elif pwv <= 0.658:
return 0.658
elif pwv <= 0.913:
return 0.913
elif pwv <= 1.262:
return 1.262
elif pwv <= 1.796:
return 1.796
elif pwv <= 2.748:
return 2.748
else:
return 5.186
def findChannel(start=None, width=None, repFreq=None, nchan=None):
channel = 0
if width < 0:
for i in xrange(nchan):
if start > repFreq:
start = start + width
else:
channel = -1.*i
break
else:
for i in xrange(nchan):
if start < repFreq:
start = start + width
else:
channel = i
break
return channel
def RadianTo(num=None, unit=None):
"""
:param num:
:param unit:
:return:
"""
Deg = float(num)*180.0/np.pi
if unit == 'dms':
if Deg < 0:
Deg = -Deg
sign = '-'
else:
sign = '+'
g = int(Deg)
m = int((Deg-g)*60.)
s = (Deg-g-m/60.)*3600.
return sign+str(g)+":"+str(m)+":"+str('%5.5f' % s)
if unit == 'hms':
h = int(Deg/15.)
m = int((Deg/15.-h)*60.)
s = (Deg/15.-h-m/60.)*3600.
return str(h)+":"+str(m)+":"+str('%5.5f' % s)
def arrayParser(line=None, dimensions=None, castFloat=False):
"""
:param line: String to be formated
:param dimensions: dimensions of the array
:return: a list, or a list of list 1D o 2D arrays, no support for 3D arrays yet
"""
result = list()
line = line.strip()
if dimensions == 1:
elements = line.split(' ')[1]
splits = line.split(' ')[2:]
for i in splits:
result.append(float(i)) if castFloat else result.append(i)
if int(elements) == len(result):
return result
else:
return False
if dimensions == 2:
rows = int(line.split(' ')[1])
columns = int(line.split(' ')[2])
splits = line.split(' ')[3:]
for j in range(0, rows):
temp = list()
for i in range(0, columns):
temp.append(float(splits[i+(j*columns)])) if castFloat else temp.append(splits[i+(j*columns)])
result.append(temp)
return result
def GetXML(archiveUID=None,table=None):
"""
:param archiveUID: Archive UID
:param table: Table
:return: XML String
"""
sqlXML = "select XMLType.GetClobVal(xml) from ALMA.XXXXYYY where archive_uid='ZZZZCCCC' "
sqlXML = sqlXML.replace('XXXXYYY',tables[table]).replace('ZZZZCCCC',archiveUID)
try:
orcl = cx_Oracle.connect(database)
cursorXML = orcl.cursor()
cursorXML.execute(sqlXML)
XMLTable = cursorXML.fetchone()
return XMLTable[0].read()
except Exception as e:
print e
return False
return False
def getProjectUID(projectCode=None):
"""
:param projectCode:
:return:
"""
sql = "select prj_archive_uid from ALMA.BMMV_OBSPROJECT where prj_code = 'XXXYYY'"
sql = sql.replace('XXXYYY',projectCode)
try:
orcl = cx_Oracle.connect(database)
cursor = orcl.cursor()
cursor.execute(sql)
data = cursor.fetchall()
orcl.close()
return data[0][0]
except Exception as e:
print e
def getSBMOUS():
sql = "select DOMAIN_ENTITY_ID, PARENT_OBS_UNIT_SET_STATUS_ID from ALMA.SCHED_BLOCK_STATUS"
try:
orcl = cx_Oracle.connect(database)
cursor = orcl.cursor()
cursor.execute(sql)
data = cursor.fetchall()
status = list()
for i in data:
status.append((i[0],i[1]))
orcl.close()
return status
except Exception as e:
print e
def getSBNames():
sql = "select archive_uid, sb_name from ALMA.BMMV_SCHEDBLOCK"
try:
orcl = cx_Oracle.connect(database)
cursor = orcl.cursor()
cursor.execute(sql)
data = cursor.fetchall()
sbnames = list()
for i in data:
sbnames.append((i[0],i[1]))
orcl.close()
return sbnames
except Exception as e:
print e
def getProjectCodes(cycle=2):
cycle_code = dict()
cycle_code[0] = '2011._.%._'
cycle_code[1] = '2012._.%._'
cycle_code[2] = '2013._.%._'
cycle_code[3] = '2015._.%._'
sql = '''select al2.PRJ_ARCHIVE_UID, al2.code
from ALMA.OBS_PROJECT_STATUS al1,
ALMA.BMMV_OBSPROJECT al2
where al1.obs_project_id in (select prj_archive_uid from ALMA.BMMV_OBSPROJECT where prj_code like 'XXXYYYZZZ')
and al1.domain_entity_state in ('Ready', 'Canceled', 'InProgress', 'Broken','Completed', 'Repaired','Phase2Submitted')
and al1.OBS_PROJECT_ID = al2.PRJ_ARCHIVE_UID '''
sql = sql.replace('XXXYYYZZZ',cycle_code[int(cycle)])
try:
orcl = cx_Oracle.connect(database)
cursor = orcl.cursor()
cursor.execute(sql)
data = cursor.fetchall()
codes = list()
for i in data:
codes.append((i[0],i[1]))
orcl.close()
return codes
except Exception as e:
print e
def getSBs(prj_uid=None):
sql = '''with t1 as (
select status_entity_id as seid1 from ALMA.OBS_UNIT_SET_STATUS where OBS_PROJECT_ID = 'PPPRRRJJJ' and PARENT_OBS_UNIT_SET_STATUS_ID is null
),
t2 as (
select status_entity_id as seid2, PARENT_OBS_UNIT_SET_STATUS_ID as paid2, domain_entity_id from ALMA.OBS_UNIT_SET_STATUS where OBS_PROJECT_ID = 'PPPRRRJJJ'
),
t3 as (
select status_entity_id as seid3, PARENT_OBS_UNIT_SET_STATUS_ID as paid3 from ALMA.OBS_UNIT_SET_STATUS where OBS_PROJECT_ID = 'PPPRRRJJJ'
),
t4 as (
select status_entity_id as seid4, PARENT_OBS_UNIT_SET_STATUS_ID as paid4 from ALMA.OBS_UNIT_SET_STATUS where OBS_PROJECT_ID = 'PPPRRRJJJ'
),
t5 as (
select domain_entity_id as scheckblock_uid, PARENT_OBS_UNIT_SET_STATUS_ID as paid5 from ALMA.SCHED_BLOCK_STATUS where OBS_PROJECT_ID = 'PPPRRRJJJ'
)
SELECT t2.domain_entity_id, t5.scheckblock_uid
FROM t1,
t2,
t3,
t4,
t5
WHERE t1.seid1 = t2.paid2
AND t2.seid2 = t3.paid3
AND t3.seid3 = t4.paid4
AND t4.seid4 = t5.paid5
ORDER BY 1 ASC'''
sql = sql.replace('PPPRRRJJJ', prj_uid)
try:
orcl = cx_Oracle.connect(database)
cursor = orcl.cursor()
cursor.execute(sql)
sb = cursor.fetchall()
orcl.close()
return sb
except Exception as e:
orcl.close()
print e
def spectrals_sb(prj_uid=None, partid=None):
sql = '''with t1 as (
select status_entity_id as seid1 from ALMA.OBS_UNIT_SET_STATUS where OBS_PROJECT_ID = 'PPPRRRJJJ' and PARENT_OBS_UNIT_SET_STATUS_ID is null
),
t2 as (
select status_entity_id as seid2, PARENT_OBS_UNIT_SET_STATUS_ID as paid2, domain_entity_id from ALMA.OBS_UNIT_SET_STATUS where OBS_PROJECT_ID = 'PPPRRRJJJ'
),
t3 as (
select status_entity_id as seid3, PARENT_OBS_UNIT_SET_STATUS_ID as paid3 from ALMA.OBS_UNIT_SET_STATUS where OBS_PROJECT_ID = 'PPPRRRJJJ'
),
t4 as (
select status_entity_id as seid4, PARENT_OBS_UNIT_SET_STATUS_ID as paid4 from ALMA.OBS_UNIT_SET_STATUS where OBS_PROJECT_ID = 'PPPRRRJJJ'
),
t5 as (
select domain_entity_id as scheckblock_uid, PARENT_OBS_UNIT_SET_STATUS_ID as paid5 from ALMA.SCHED_BLOCK_STATUS where OBS_PROJECT_ID = 'PPPRRRJJJ'
)
SELECT t2.domain_entity_id, t5.scheckblock_uid
FROM t1,
t2,
t3,
t4,
t5
WHERE t1.seid1 = t2.paid2
AND t2.seid2 = t3.paid3
AND t3.seid3 = t4.paid4
AND t4.seid4 = t5.paid5
AND t2.domain_entity_id = 'ZZZXXXYYY'
ORDER BY 1 ASC'''
sql = sql.replace('PPPRRRJJJ', prj_uid).replace('ZZZXXXYYY', partid)
try:
orcl = cx_Oracle.connect(database)
cursor = orcl.cursor()
cursor.execute(sql)
sb = cursor.fetchall()
specscan = list()
for i in sb:
specscan.append((prj_uid,i[1],'SpectralScan'))
return specscan
except Exception as e:
print e
def is_spectralscan(prj_uid=None):
sql = '''select al1.archive_uid, x.*
from
ALMA.XML_OBSPROJECT_ENTITIES al1,
XMLTable('for $first in /*:ObsProject/*:ObsProgram/*:ScienceGoal return element i {
element pol { data($first/*:SpectralSetupParameters/@polarisation)},
element type { data($first/*:SpectralSetupParameters/@type)},
element partid { data($first/*:ObsUnitSetRef/@partId)}
}'
PASSING al1.XML COLUMNS
pol varchar2(50) PATH 'pol',
type varchar2(32) PATH 'type',
partid varchar2(20) PATH 'partid'
) x
where al1. archive_uid = 'XXXXYYYY'
order by al1.timestamp desc'''
sql = sql.replace('XXXXYYYY', prj_uid)
try:
orcl = cx_Oracle.connect(database)
cursor = orcl.cursor()
cursor.execute(sql)
science_goals = cursor.fetchall()
cursor.close()
return science_goals
except Exception as e:
print e
def is_band89(prj_uid=None):
sql = '''with t1 as (
select status_entity_id as seid1 from ALMA.OBS_UNIT_SET_STATUS where OBS_PROJECT_ID = 'XXXXYYYYZZZZ' and PARENT_OBS_UNIT_SET_STATUS_ID is null
),
t2 as (
select status_entity_id as seid2, PARENT_OBS_UNIT_SET_STATUS_ID as paid2, domain_entity_id from ALMA.OBS_UNIT_SET_STATUS where OBS_PROJECT_ID = 'XXXXYYYYZZZZ'
),
t3 as (
select status_entity_id as seid3, PARENT_OBS_UNIT_SET_STATUS_ID as paid3 from ALMA.OBS_UNIT_SET_STATUS where OBS_PROJECT_ID = 'XXXXYYYYZZZZ'
),
t4 as (
select status_entity_id as seid4, PARENT_OBS_UNIT_SET_STATUS_ID as paid4 from ALMA.OBS_UNIT_SET_STATUS where OBS_PROJECT_ID = 'XXXXYYYYZZZZ'
),
t5 as (
select domain_entity_id as schedblock_uid, PARENT_OBS_UNIT_SET_STATUS_ID as paid5 from ALMA.SCHED_BLOCK_STATUS where OBS_PROJECT_ID = 'XXXXYYYYZZZZ'
),
t6 as (
select archive_uid as sb_uid, receiver_band as band from ALMA.BMMV_SCHEDBLOCK where prj_ref = 'XXXXYYYYZZZZ'
)
SELECT t2.domain_entity_id, t5.schedblock_uid,t6.band
FROM t1,
t2,
t3,
t4,
t5,
t6
WHERE t1.seid1 = t2.paid2
AND t2.seid2 = t3.paid3
AND t3.seid3 = t4.paid4
AND t4.seid4 = t5.paid5
and t6.sb_uid = t5.schedblock_uid
ORDER BY 1 ASC'''
sql = sql.replace('XXXXYYYYZZZZ',prj_uid)
try:
orcl = cx_Oracle.connect(database)
cursor = orcl.cursor()
cursor.execute(sql)
sb = cursor.fetchall()
cursor.close()
return sb
except Exception as e:
print e
| mit |
HolgerPeters/scikit-learn | sklearn/decomposition/__init__.py | 66 | 1433 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, non_negative_factorization
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'non_negative_factorization',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
| bsd-3-clause |
mmottahedi/neuralnilm_prototype | scripts/e191.py | 2 | 6647 | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from neuralnilm.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for BLSTM
e110
* Back to Uniform(5) for BLSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single BLSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
e128
* Add back all 5 appliances
* Seq length 1500
* skip_prob = 0.7
e129
* max_input_power = None
* 2nd layer has Uniform(5)
* pre-train bottom layer for 2000 epochs
* add third layer at 4000 epochs
e131
e138
* Trying to replicate e82 and then break it ;)
e140
diff
e141
conv1D layer has Uniform(1), as does 2nd BLSTM layer
e142
diff AND power
e144
diff and power and max power is 5900
e145
Uniform(25) for first layer
e146
gradient clip and use peepholes
e147
* try again with new code
e148
* learning rate 0.1
e150
* Same as e149 but without peepholes and using BLSTM not BBLSTM
e151
* Max pooling
171
lower learning rate
172
even lower learning rate
173
slightly higher learning rate!
175
same as 174 but with skip prob = 0, and LSTM not BLSTM, and only 4000 epochs
176
new cost function
177
another new cost func (this one avoids NaNs)
skip prob 0.7
10x higher learning rate
178
refactored cost func (functionally equiv to 177)
0.1x learning rate
e180
* mse
e181
* back to scaled cost
* different architecture:
- convd1 at input (2x)
- then 3 LSTM layers, each with a 2x conv in between
- no diff input
e189
* divide dominant appliance power
* mse
"""
# def scaled_cost(x, t):
# raw_cost = (x - t) ** 2
# energy_per_seq = t.sum(axis=1)
# energy_per_batch = energy_per_seq.sum(axis=1)
# energy_per_batch = energy_per_batch.reshape((-1, 1))
# normaliser = energy_per_seq / energy_per_batch
# cost = raw_cost.mean(axis=1) * (1 - normaliser)
# return cost.mean()
from theano.ifelse import ifelse
import theano.tensor as T
THRESHOLD = 0
def scaled_cost(x, t):
sq_error = (x - t) ** 2
def mask_and_mean_sq_error(mask):
masked_sq_error = sq_error[mask.nonzero()]
mean = masked_sq_error.mean()
mean = ifelse(T.isnan(mean), 0.0, mean)
return mean
above_thresh_mean = mask_and_mean_sq_error(t > THRESHOLD)
below_thresh_mean = mask_and_mean_sq_error(t <= THRESHOLD)
return (above_thresh_mean + below_thresh_mean) / 2.0
def exp_a(name):
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[0.5, 0.5, 2, 20, 20],
on_power_thresholds=[5, 5, 5, 5, 5],
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1520,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=25,
input_padding=1,
include_diff=False,
clip_appliance_power=False
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=1000,
loss_function=mse,
updates=partial(nesterov_momentum, learning_rate=.00001, clip_range=(-1, 1)),
layers_config=[
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 10,
'filter_length': 2,
'stride': 1,
'nonlinearity': sigmoid
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Uniform(1)
}
]
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=None)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
raise
print("EXCEPTION:", exception)
import ipdb; ipdb.set_trace()
if __name__ == "__main__":
main()
| mit |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/matplotlib/backends/backend_gtk.py | 10 | 37753 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os, sys, warnings
def fn_name(): return sys._getframe(1).f_code.co_name
if six.PY3:
warnings.warn(
"The gtk* backends have not been tested with Python 3.x",
ImportWarning)
try:
import gobject
import gtk; gdk = gtk.gdk
import pango
except ImportError:
raise ImportError("Gtk* backend requires pygtk to be installed.")
pygtk_version_required = (2,4,0)
if gtk.pygtk_version < pygtk_version_required:
raise ImportError ("PyGTK %d.%d.%d is installed\n"
"PyGTK %d.%d.%d or later is required"
% (gtk.pygtk_version + pygtk_version_required))
del pygtk_version_required
_new_tooltip_api = (gtk.pygtk_version[1] >= 12)
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors, TimerBase
from matplotlib.backend_bases import ShowBase
from matplotlib.backends.backend_gdk import RendererGDK, FigureCanvasGDK
from matplotlib.cbook import is_string_like, is_writable_file_like
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
from matplotlib.cbook import warn_deprecated
from matplotlib import (
cbook, colors as mcolors, lines, markers, rcParams, verbose)
backend_version = "%d.%d.%d" % gtk.pygtk_version
_debug = False
#_debug = True
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 96
# Hide the benign warning that it can't stat a file that doesn't
warnings.filterwarnings('ignore', '.*Unable to retrieve the file info for.*', gtk.Warning)
cursord = {
cursors.MOVE : gdk.Cursor(gdk.FLEUR),
cursors.HAND : gdk.Cursor(gdk.HAND2),
cursors.POINTER : gdk.Cursor(gdk.LEFT_PTR),
cursors.SELECT_REGION : gdk.Cursor(gdk.TCROSS),
}
# ref gtk+/gtk/gtkwidget.h
def GTK_WIDGET_DRAWABLE(w):
flags = w.flags();
return flags & gtk.VISIBLE != 0 and flags & gtk.MAPPED != 0
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw_idle()
class Show(ShowBase):
def mainloop(self):
if gtk.main_level() == 0:
gtk.main()
show = Show()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasGTK(figure)
manager = FigureManagerGTK(canvas, num)
return manager
class TimerGTK(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses GTK for timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def _timer_start(self):
# Need to stop it, otherwise we potentially leak a timer id that will
# never be stopped.
self._timer_stop()
self._timer = gobject.timeout_add(self._interval, self._on_timer)
def _timer_stop(self):
if self._timer is not None:
gobject.source_remove(self._timer)
self._timer = None
def _timer_set_interval(self):
# Only stop and restart it if the timer has already been started
if self._timer is not None:
self._timer_stop()
self._timer_start()
def _on_timer(self):
TimerBase._on_timer(self)
# Gtk timeout_add() requires that the callback returns True if it
# is to be called again.
if len(self.callbacks) > 0 and not self._single:
return True
else:
self._timer = None
return False
class FigureCanvasGTK (gtk.DrawingArea, FigureCanvasBase):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
65511 : 'super',
65512 : 'super',
65406 : 'alt',
65289 : 'tab',
}
# Setting this as a static constant prevents
# this resulting expression from leaking
event_mask = (gdk.BUTTON_PRESS_MASK |
gdk.BUTTON_RELEASE_MASK |
gdk.EXPOSURE_MASK |
gdk.KEY_PRESS_MASK |
gdk.KEY_RELEASE_MASK |
gdk.ENTER_NOTIFY_MASK |
gdk.LEAVE_NOTIFY_MASK |
gdk.POINTER_MOTION_MASK |
gdk.POINTER_MOTION_HINT_MASK)
def __init__(self, figure):
if self.__class__ == matplotlib.backends.backend_gtk.FigureCanvasGTK:
warn_deprecated('2.0', message="The GTK backend is "
"deprecated. It is untested, known to be "
"broken and will be removed in Matplotlib 2.2. "
"Use the GTKAgg backend instead. "
"See Matplotlib usage FAQ for"
" more info on backends.",
alternative="GTKAgg")
if _debug: print('FigureCanvasGTK.%s' % fn_name())
FigureCanvasBase.__init__(self, figure)
gtk.DrawingArea.__init__(self)
self._idle_draw_id = 0
self._need_redraw = True
self._pixmap_width = -1
self._pixmap_height = -1
self._lastCursor = None
self.connect('scroll_event', self.scroll_event)
self.connect('button_press_event', self.button_press_event)
self.connect('button_release_event', self.button_release_event)
self.connect('configure_event', self.configure_event)
self.connect('expose_event', self.expose_event)
self.connect('key_press_event', self.key_press_event)
self.connect('key_release_event', self.key_release_event)
self.connect('motion_notify_event', self.motion_notify_event)
self.connect('leave_notify_event', self.leave_notify_event)
self.connect('enter_notify_event', self.enter_notify_event)
self.set_events(self.__class__.event_mask)
self.set_double_buffered(False)
self.set_flags(gtk.CAN_FOCUS)
self._renderer_init()
self.last_downclick = {}
def destroy(self):
#gtk.DrawingArea.destroy(self)
self.close_event()
if self._idle_draw_id != 0:
gobject.source_remove(self._idle_draw_id)
def scroll_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
if event.direction==gdk.SCROLL_UP:
step = 1
else:
step = -1
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
return False # finish event propagation?
def button_press_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
dblclick = (event.type == gdk._2BUTTON_PRESS)
if not dblclick:
# GTK is the only backend that generates a DOWN-UP-DOWN-DBLCLICK-UP event
# sequence for a double click. All other backends have a DOWN-UP-DBLCLICK-UP
# sequence. In order to provide consistency to matplotlib users, we will
# eat the extra DOWN event in the case that we detect it is part of a double
# click.
# first, get the double click time in milliseconds.
current_time = event.get_time()
last_time = self.last_downclick.get(event.button,0)
dblclick_time = gtk.settings_get_for_screen(gdk.screen_get_default()).get_property('gtk-double-click-time')
delta_time = current_time-last_time
if delta_time < dblclick_time:
del self.last_downclick[event.button] # we do not want to eat more than one event.
return False # eat.
self.last_downclick[event.button] = current_time
FigureCanvasBase.button_press_event(self, x, y, event.button, dblclick=dblclick, guiEvent=event)
return False # finish event propagation?
def button_release_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
FigureCanvasBase.button_release_event(self, x, y, event.button, guiEvent=event)
return False # finish event propagation?
def key_press_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
key = self._get_key(event)
if _debug: print("hit", key)
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
return True # stop event propagation
def key_release_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
key = self._get_key(event)
if _debug: print("release", key)
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
return True # stop event propagation
def motion_notify_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
if event.is_hint:
x, y, state = event.window.get_pointer()
else:
x, y, state = event.x, event.y, event.state
# flipy so y=0 is bottom of canvas
y = self.allocation.height - y
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
return False # finish event propagation?
def leave_notify_event(self, widget, event):
FigureCanvasBase.leave_notify_event(self, event)
def enter_notify_event(self, widget, event):
x, y, state = event.window.get_pointer()
FigureCanvasBase.enter_notify_event(self, event, xy=(x, y))
def _get_key(self, event):
if event.keyval in self.keyvald:
key = self.keyvald[event.keyval]
elif event.keyval < 256:
key = chr(event.keyval)
else:
key = None
for key_mask, prefix in (
[gdk.MOD4_MASK, 'super'],
[gdk.MOD1_MASK, 'alt'],
[gdk.CONTROL_MASK, 'ctrl'], ):
if event.state & key_mask:
key = '{0}+{1}'.format(prefix, key)
return key
def configure_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
if widget.window is None:
return
w, h = event.width, event.height
if w < 3 or h < 3:
return # empty fig
# resize the figure (in inches)
dpi = self.figure.dpi
self.figure.set_size_inches(w/dpi, h/dpi, forward=False)
self._need_redraw = True
return False # finish event propagation?
def draw(self):
# Note: FigureCanvasBase.draw() is inconveniently named as it clashes
# with the deprecated gtk.Widget.draw()
self._need_redraw = True
if GTK_WIDGET_DRAWABLE(self):
self.queue_draw()
# do a synchronous draw (its less efficient than an async draw,
# but is required if/when animation is used)
self.window.process_updates (False)
def draw_idle(self):
if self._idle_draw_id != 0:
return
def idle_draw(*args):
try:
self.draw()
finally:
self._idle_draw_id = 0
return False
self._idle_draw_id = gobject.idle_add(idle_draw)
def _renderer_init(self):
"""Override by GTK backends to select a different renderer
Renderer should provide the methods:
set_pixmap ()
set_width_height ()
that are used by
_render_figure() / _pixmap_prepare()
"""
self._renderer = RendererGDK (self, self.figure.dpi)
def _pixmap_prepare(self, width, height):
"""
Make sure _._pixmap is at least width, height,
create new pixmap if necessary
"""
if _debug: print('FigureCanvasGTK.%s' % fn_name())
create_pixmap = False
if width > self._pixmap_width:
# increase the pixmap in 10%+ (rather than 1 pixel) steps
self._pixmap_width = max (int (self._pixmap_width * 1.1),
width)
create_pixmap = True
if height > self._pixmap_height:
self._pixmap_height = max (int (self._pixmap_height * 1.1),
height)
create_pixmap = True
if create_pixmap:
self._pixmap = gdk.Pixmap (self.window, self._pixmap_width,
self._pixmap_height)
self._renderer.set_pixmap (self._pixmap)
def _render_figure(self, pixmap, width, height):
"""used by GTK and GTKcairo. GTKAgg overrides
"""
self._renderer.set_width_height (width, height)
self.figure.draw (self._renderer)
def expose_event(self, widget, event):
"""Expose_event for all GTK backends. Should not be overridden.
"""
if _debug: print('FigureCanvasGTK.%s' % fn_name())
if GTK_WIDGET_DRAWABLE(self):
if self._need_redraw:
x, y, w, h = self.allocation
self._pixmap_prepare (w, h)
self._render_figure(self._pixmap, w, h)
self._need_redraw = False
x, y, w, h = event.area
self.window.draw_drawable (self.style.fg_gc[self.state],
self._pixmap, x, y, x, y, w, h)
return False # finish event propagation?
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['jpg'] = 'JPEG'
filetypes['jpeg'] = 'JPEG'
filetypes['png'] = 'Portable Network Graphics'
def print_jpeg(self, filename, *args, **kwargs):
return self._print_image(filename, 'jpeg')
print_jpg = print_jpeg
def print_png(self, filename, *args, **kwargs):
return self._print_image(filename, 'png')
def _print_image(self, filename, format, *args, **kwargs):
if self.flags() & gtk.REALIZED == 0:
# for self.window(for pixmap) and has a side effect of altering
# figure width,height (via configure-event?)
gtk.DrawingArea.realize(self)
width, height = self.get_width_height()
pixmap = gdk.Pixmap (self.window, width, height)
self._renderer.set_pixmap (pixmap)
self._render_figure(pixmap, width, height)
# jpg colors don't match the display very well, png colors match
# better
pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, 0, 8, width, height)
pixbuf.get_from_drawable(pixmap, pixmap.get_colormap(),
0, 0, 0, 0, width, height)
# set the default quality, if we are writing a JPEG.
# http://www.pygtk.org/docs/pygtk/class-gdkpixbuf.html#method-gdkpixbuf--save
options = cbook.restrict_dict(kwargs, ['quality'])
if format in ['jpg','jpeg']:
if 'quality' not in options:
options['quality'] = rcParams['savefig.jpeg_quality']
options['quality'] = str(options['quality'])
if is_string_like(filename):
try:
pixbuf.save(filename, format, options=options)
except gobject.GError as exc:
error_msg_gtk('Save figure failure:\n%s' % (exc,), parent=self)
elif is_writable_file_like(filename):
if hasattr(pixbuf, 'save_to_callback'):
def save_callback(buf, data=None):
data.write(buf)
try:
pixbuf.save_to_callback(save_callback, format, user_data=filename, options=options)
except gobject.GError as exc:
error_msg_gtk('Save figure failure:\n%s' % (exc,), parent=self)
else:
raise ValueError("Saving to a Python file-like object is only supported by PyGTK >= 2.8")
else:
raise ValueError("filename must be a path or a file-like object")
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerGTK(*args, **kwargs)
def flush_events(self):
gtk.gdk.threads_enter()
while gtk.events_pending():
gtk.main_iteration(True)
gtk.gdk.flush()
gtk.gdk.threads_leave()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerGTK(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The gtk.Toolbar (gtk only)
vbox : The gtk.VBox containing the canvas and toolbar (gtk only)
window : The gtk.Window (gtk only)
"""
def __init__(self, canvas, num):
if _debug: print('FigureManagerGTK.%s' % fn_name())
FigureManagerBase.__init__(self, canvas, num)
self.window = gtk.Window()
self.set_window_title("Figure %d" % num)
if (window_icon):
try:
self.window.set_icon_from_file(window_icon)
except:
# some versions of gtk throw a glib.GError but not
# all, so I am not sure how to catch it. I am unhappy
# diong a blanket catch here, but an not sure what a
# better way is - JDH
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
self.vbox = gtk.VBox()
self.window.add(self.vbox)
self.vbox.show()
self.canvas.show()
self.vbox.pack_start(self.canvas, True, True)
self.toolbar = self._get_toolbar(canvas)
# calculate size for window
w = int (self.canvas.figure.bbox.width)
h = int (self.canvas.figure.bbox.height)
if self.toolbar is not None:
self.toolbar.show()
self.vbox.pack_end(self.toolbar, False, False)
tb_w, tb_h = self.toolbar.size_request()
h += tb_h
self.window.set_default_size (w, h)
def destroy(*args):
Gcf.destroy(num)
self.window.connect("destroy", destroy)
self.window.connect("delete_event", destroy)
if matplotlib.is_interactive():
self.window.show()
self.canvas.draw_idle()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar is not None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
self.canvas.grab_focus()
def destroy(self, *args):
if _debug: print('FigureManagerGTK.%s' % fn_name())
if hasattr(self, 'toolbar') and self.toolbar is not None:
self.toolbar.destroy()
if hasattr(self, 'vbox'):
self.vbox.destroy()
if hasattr(self, 'window'):
self.window.destroy()
if hasattr(self, 'canvas'):
self.canvas.destroy()
self.__dict__.clear() #Is this needed? Other backends don't have it.
if Gcf.get_num_fig_managers()==0 and \
not matplotlib.is_interactive() and \
gtk.main_level() >= 1:
gtk.main_quit()
def show(self):
# show the figure window
self.window.show()
def full_screen_toggle(self):
self._full_screen_flag = not self._full_screen_flag
if self._full_screen_flag:
self.window.fullscreen()
else:
self.window.unfullscreen()
_full_screen_flag = False
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2GTK (canvas, self.window)
else:
toolbar = None
return toolbar
def get_window_title(self):
return self.window.get_title()
def set_window_title(self, title):
self.window.set_title(title)
def resize(self, width, height):
'set the canvas size in pixels'
#_, _, cw, ch = self.canvas.allocation
#_, _, ww, wh = self.window.allocation
#self.window.resize (width-cw+ww, height-ch+wh)
self.window.resize(width, height)
class NavigationToolbar2GTK(NavigationToolbar2, gtk.Toolbar):
def __init__(self, canvas, window):
self.win = window
gtk.Toolbar.__init__(self)
NavigationToolbar2.__init__(self, canvas)
def set_message(self, s):
self.message.set_label(s)
def set_cursor(self, cursor):
self.canvas.window.set_cursor(cursord[cursor])
def release(self, event):
try: del self._pixmapBack
except AttributeError: pass
def dynamic_update(self):
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
def draw_rubberband(self, event, x0, y0, x1, y1):
'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'
drawable = self.canvas.window
if drawable is None:
return
gc = drawable.new_gc()
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val)for val in (min(x0,x1), min(y0, y1), w, h)]
try:
lastrect, pixmapBack = self._pixmapBack
except AttributeError:
#snap image back
if event.inaxes is None:
return
ax = event.inaxes
l,b,w,h = [int(val) for val in ax.bbox.bounds]
b = int(height)-(b+h)
axrect = l,b,w,h
self._pixmapBack = axrect, gtk.gdk.Pixmap(drawable, w, h)
self._pixmapBack[1].draw_drawable(gc, drawable, l, b, 0, 0, w, h)
else:
drawable.draw_drawable(gc, pixmapBack, 0, 0, *lastrect)
drawable.draw_rectangle(gc, False, *rect)
def _init_toolbar(self):
self.set_style(gtk.TOOLBAR_ICONS)
self._init_toolbar2_4()
def _init_toolbar2_4(self):
basedir = os.path.join(rcParams['datapath'],'images')
if not _new_tooltip_api:
self.tooltips = gtk.Tooltips()
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.insert( gtk.SeparatorToolItem(), -1 )
continue
fname = os.path.join(basedir, image_file + '.png')
image = gtk.Image()
image.set_from_file(fname)
tbutton = gtk.ToolButton(image, text)
self.insert(tbutton, -1)
tbutton.connect('clicked', getattr(self, callback))
if _new_tooltip_api:
tbutton.set_tooltip_text(tooltip_text)
else:
tbutton.set_tooltip(self.tooltips, tooltip_text, 'Private')
toolitem = gtk.SeparatorToolItem()
self.insert(toolitem, -1)
# set_draw() not making separator invisible,
# bug #143692 fixed Jun 06 2004, will be in GTK+ 2.6
toolitem.set_draw(False)
toolitem.set_expand(True)
toolitem = gtk.ToolItem()
self.insert(toolitem, -1)
self.message = gtk.Label()
toolitem.add(self.message)
self.show_all()
def get_filechooser(self):
fc = FileChooserDialog(
title='Save the figure',
parent=self.win,
path=os.path.expanduser(rcParams.get('savefig.directory', '')),
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
fc.set_current_name(self.canvas.get_default_filename())
return fc
def save_figure(self, *args):
chooser = self.get_filechooser()
fname, format = chooser.get_filename_from_user()
chooser.destroy()
if fname:
startpath = os.path.expanduser(rcParams.get('savefig.directory', ''))
if startpath == '':
# explicitly missing key or empty str signals to use cwd
rcParams['savefig.directory'] = startpath
else:
# save dir for next time
rcParams['savefig.directory'] = os.path.dirname(six.text_type(fname))
try:
self.canvas.print_figure(fname, format=format)
except Exception as e:
error_msg_gtk(str(e), parent=self)
def configure_subplots(self, button):
toolfig = Figure(figsize=(6,3))
canvas = self._get_canvas(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
w = int (toolfig.bbox.width)
h = int (toolfig.bbox.height)
window = gtk.Window()
if (window_icon):
try: window.set_icon_from_file(window_icon)
except:
# we presumably already logged a message on the
# failure of the main plot, don't keep reporting
pass
window.set_title("Subplot Configuration Tool")
window.set_default_size(w, h)
vbox = gtk.VBox()
window.add(vbox)
vbox.show()
canvas.show()
vbox.pack_start(canvas, True, True)
window.show()
def _get_canvas(self, fig):
return FigureCanvasGTK(fig)
class FileChooserDialog(gtk.FileChooserDialog):
"""GTK+ 2.4 file selector which presents the user with a menu
of supported image formats
"""
def __init__ (self,
title = 'Save file',
parent = None,
action = gtk.FILE_CHOOSER_ACTION_SAVE,
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK),
path = None,
filetypes = [],
default_filetype = None
):
super(FileChooserDialog, self).__init__ (title, parent, action,
buttons)
super(FileChooserDialog, self).set_do_overwrite_confirmation(True)
self.set_default_response (gtk.RESPONSE_OK)
if not path: path = os.getcwd() + os.sep
# create an extra widget to list supported image formats
self.set_current_folder (path)
self.set_current_name ('image.' + default_filetype)
hbox = gtk.HBox (spacing=10)
hbox.pack_start (gtk.Label ("File Format:"), expand=False)
liststore = gtk.ListStore(gobject.TYPE_STRING)
cbox = gtk.ComboBox(liststore)
cell = gtk.CellRendererText()
cbox.pack_start(cell, True)
cbox.add_attribute(cell, 'text', 0)
hbox.pack_start (cbox)
self.filetypes = filetypes
self.sorted_filetypes = list(six.iteritems(filetypes))
self.sorted_filetypes.sort()
default = 0
for i, (ext, name) in enumerate(self.sorted_filetypes):
cbox.append_text ("%s (*.%s)" % (name, ext))
if ext == default_filetype:
default = i
cbox.set_active(default)
self.ext = default_filetype
def cb_cbox_changed (cbox, data=None):
"""File extension changed"""
head, filename = os.path.split(self.get_filename())
root, ext = os.path.splitext(filename)
ext = ext[1:]
new_ext = self.sorted_filetypes[cbox.get_active()][0]
self.ext = new_ext
if ext in self.filetypes:
filename = root + '.' + new_ext
elif ext == '':
filename = filename.rstrip('.') + '.' + new_ext
self.set_current_name (filename)
cbox.connect ("changed", cb_cbox_changed)
hbox.show_all()
self.set_extra_widget(hbox)
def get_filename_from_user (self):
while True:
filename = None
if self.run() != int(gtk.RESPONSE_OK):
break
filename = self.get_filename()
break
return filename, self.ext
class DialogLineprops(object):
"""
A GUI dialog for controlling lineprops
"""
signals = (
'on_combobox_lineprops_changed',
'on_combobox_linestyle_changed',
'on_combobox_marker_changed',
'on_colorbutton_linestyle_color_set',
'on_colorbutton_markerface_color_set',
'on_dialog_lineprops_okbutton_clicked',
'on_dialog_lineprops_cancelbutton_clicked',
)
linestyles = [ls for ls in lines.Line2D.lineStyles if ls.strip()]
linestyled = dict([ (s,i) for i,s in enumerate(linestyles)])
markers = [m for m in markers.MarkerStyle.markers if cbook.is_string_like(m)]
markerd = dict([(s,i) for i,s in enumerate(markers)])
def __init__(self, lines):
import gtk.glade
datadir = matplotlib.get_data_path()
gladefile = os.path.join(datadir, 'lineprops.glade')
if not os.path.exists(gladefile):
raise IOError('Could not find gladefile lineprops.glade in %s'%datadir)
self._inited = False
self._updateson = True # suppress updates when setting widgets manually
self.wtree = gtk.glade.XML(gladefile, 'dialog_lineprops')
self.wtree.signal_autoconnect(dict([(s, getattr(self, s)) for s in self.signals]))
self.dlg = self.wtree.get_widget('dialog_lineprops')
self.lines = lines
cbox = self.wtree.get_widget('combobox_lineprops')
cbox.set_active(0)
self.cbox_lineprops = cbox
cbox = self.wtree.get_widget('combobox_linestyles')
for ls in self.linestyles:
cbox.append_text(ls)
cbox.set_active(0)
self.cbox_linestyles = cbox
cbox = self.wtree.get_widget('combobox_markers')
for m in self.markers:
cbox.append_text(m)
cbox.set_active(0)
self.cbox_markers = cbox
self._lastcnt = 0
self._inited = True
def show(self):
'populate the combo box'
self._updateson = False
# flush the old
cbox = self.cbox_lineprops
for i in range(self._lastcnt-1,-1,-1):
cbox.remove_text(i)
# add the new
for line in self.lines:
cbox.append_text(line.get_label())
cbox.set_active(0)
self._updateson = True
self._lastcnt = len(self.lines)
self.dlg.show()
def get_active_line(self):
'get the active line'
ind = self.cbox_lineprops.get_active()
line = self.lines[ind]
return line
def get_active_linestyle(self):
'get the active lineinestyle'
ind = self.cbox_linestyles.get_active()
ls = self.linestyles[ind]
return ls
def get_active_marker(self):
'get the active lineinestyle'
ind = self.cbox_markers.get_active()
m = self.markers[ind]
return m
def _update(self):
'update the active line props from the widgets'
if not self._inited or not self._updateson: return
line = self.get_active_line()
ls = self.get_active_linestyle()
marker = self.get_active_marker()
line.set_linestyle(ls)
line.set_marker(marker)
button = self.wtree.get_widget('colorbutton_linestyle')
color = button.get_color()
r, g, b = [val/65535. for val in (color.red, color.green, color.blue)]
line.set_color((r,g,b))
button = self.wtree.get_widget('colorbutton_markerface')
color = button.get_color()
r, g, b = [val/65535. for val in (color.red, color.green, color.blue)]
line.set_markerfacecolor((r,g,b))
line.figure.canvas.draw()
def on_combobox_lineprops_changed(self, item):
'update the widgets from the active line'
if not self._inited: return
self._updateson = False
line = self.get_active_line()
ls = line.get_linestyle()
if ls is None: ls = 'None'
self.cbox_linestyles.set_active(self.linestyled[ls])
marker = line.get_marker()
if marker is None: marker = 'None'
self.cbox_markers.set_active(self.markerd[marker])
rgba = mcolors.to_rgba(line.get_color())
color = gtk.gdk.Color(*[int(val*65535) for val in rgba[:3]])
button = self.wtree.get_widget('colorbutton_linestyle')
button.set_color(color)
rgba = mcolors.to_rgba(line.get_markerfacecolor())
color = gtk.gdk.Color(*[int(val*65535) for val in rgba[:3]])
button = self.wtree.get_widget('colorbutton_markerface')
button.set_color(color)
self._updateson = True
def on_combobox_linestyle_changed(self, item):
self._update()
def on_combobox_marker_changed(self, item):
self._update()
def on_colorbutton_linestyle_color_set(self, button):
self._update()
def on_colorbutton_markerface_color_set(self, button):
'called colorbutton marker clicked'
self._update()
def on_dialog_lineprops_okbutton_clicked(self, button):
self._update()
self.dlg.hide()
def on_dialog_lineprops_cancelbutton_clicked(self, button):
self.dlg.hide()
# set icon used when windows are minimized
# Unfortunately, the SVG renderer (rsvg) leaks memory under earlier
# versions of pygtk, so we have to use a PNG file instead.
try:
if gtk.pygtk_version < (2, 8, 0) or sys.platform == 'win32':
icon_filename = 'matplotlib.png'
else:
icon_filename = 'matplotlib.svg'
window_icon = os.path.join(rcParams['datapath'], 'images', icon_filename)
except:
window_icon = None
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
def error_msg_gtk(msg, parent=None):
if parent is not None: # find the toplevel gtk.Window
parent = parent.get_toplevel()
if parent.flags() & gtk.TOPLEVEL == 0:
parent = None
if not is_string_like(msg):
msg = ','.join(map(str,msg))
dialog = gtk.MessageDialog(
parent = parent,
type = gtk.MESSAGE_ERROR,
buttons = gtk.BUTTONS_OK,
message_format = msg)
dialog.run()
dialog.destroy()
FigureCanvas = FigureCanvasGTK
FigureManager = FigureManagerGTK
| apache-2.0 |
larsmans/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 43 | 3343 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', linewidths=0, zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
gaurav-kaushik/BostonBikr | app/reTrip.py | 1 | 10980 | """
reTrip will take text files of TripAdvisor data and turn them into Geocoded locations to transpose onto the map.
This uses a rather hacky text-based method because of time constraints. A full-featured web crawler that could be used for many pages or other sites at once would be much better. But since we only collect data once, this works OK for now.
Created on Wed Jul 22 09:47:51 2015
@author: gaurav
"""
import os
import re
import pickle
from math import sin, cos, sqrt, atan2, radians
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from scipy.cluster.vq import kmeans, vq
from pylab import plot
from BostonBikr import findNearestNodeNX, distanceCal, distanceCal4par
# Import your TripAdvisor data
try:
txt = open('bostontext.txt')
boston = txt.read()
except:
os.chdir('./anaconda/BostonBikr/')
txt = open('bostontext.txt')
boston = txt.read()
# Run that through regex
try:
reBoston = pickle.load(open("reBoston.p","rb"))
except:
# reBoston has the strings for each location
reBoston = re.findall("\n(.*?)\n#", boston)
print reBoston
reBoston = list(set(reBoston))
reBoston_original = reBoston
pickle.dump(reBoston, open("reBoston.p","wb"))
def GeoCode(address):
# take 'address' <type string> and get geocoordinates
import json
from urllib2 import urlopen
from urllib import quote
# encode address query into URL
url = 'https://maps.googleapis.com/maps/api/geocode/json?address={}&sensor=false&key={}'.format(quote(address, gAPI_key))
# call API and extract json
print 'Calling Google for the following address: ' + address
jData = urlopen(url).read()
jData = json.loads(jData.decode('utf-8')) # THIS MIGHT THROW AN ERROR
# extract coordinates (latitude, longitude)
if jData.get('status') == 'ZERO_RESULTS':
latitude, longitude = None, None
print 'The following address was not found: ' + address
else:
try:
latitude, longitude = (value for _, value in sorted(jData.get('results')[0].get('geometry').get('location').items()))
print 'Your location is at the following coordinates: {:f}, {:f}'.format(longitude, latitude)
except IndexError:
latitude, longitude = None, None
return (longitude, latitude)
# Define map boundary
bounds = [[42.33289, -71.173794], [42.420644, -71.040413]]
# Put it down flip it and reverse it
bounds = [[y,x] for [x,y] in bounds]
minLong = bounds[0][0]
maxLong = bounds[1][0]
minLat = bounds[1][0]
maxLat = bounds[1][1]
# Get our variables (or generate if not found)
try:
reBostonGeoBound = pickle.load(open("reBostonGeoBound.p","rb"))
reBostonGeoLoc = pickle.load(open("reBostonLoc.p","rb"))
except:
# Use Geocode to transform into coordinates
reBostonGeo = []
reBostonLoc = []
for location in reBoston:
reBostonGeo.append(GeoCode(location))
reBostonGeo_original = reBostonGeo
# Use the boundaries to discard locations outside your matrix
reBostonGeoBound = []
for index, location in enumerate(reBostonGeo):
if (minLong <= location[0] < maxLong) and (minLat <= location[1] <= maxLat):
reBostonGeoBound.append(location)
reBostonLoc.append(reBoston[index])
# Call our giant Boston map and the nodes
bostonGraphNX = pickle.load(open("/home/gaurav/anaconda/BostonBikr/app/static/bostonMetroArea.p", "rb"))
bostonGraphNXPos = pickle.load(open("/home/gaurav/anaconda/BostonBikr/app/static/bostonMetroArea_pos.p", "rb"))
## Reassign nodes to proximal node in bostonGraphNX
for idx, node in enumerate(reBostonGeoBound):
_, newNode = findNearestNodeNX(bostonGraphNX, node)
reBostonGeoBound[idx] = newNode
# Dump these pickles once you have them transposed onto your graph
pickle.dump(reBostonGeoBound, open("reBostonGeoBound.p","wb"))
pickle.dump(reBostonLoc, open("reBostonLoc.p","wb"))
# Let's take these nodes and create a 2D Gaussian function
def bostonGauss(node, sigma=2355.0):
xNode, yNode = node[0], node[1]
metersPerLat = 82190.6
metersPerLng = 111230.0
# Transform your sigma into Lat and Long Space (m --> deg lat or lng)
# Note that sigmaX != sigmaY because real planets have curves
sigmaX = sigma/metersPerLng
sigmaY = sigma/metersPerLat
bounds = [[-71.173794, 42.33289], [-71.040413, 42.420644]]
xSpace = np.linspace(bounds[0][0], bounds[1][0], 1000)
ySpace = np.linspace(bounds[0][1], bounds[1][1], 1000)
x, y = np.meshgrid(xSpace, ySpace)
Z = (100/(2*np.pi*sigmaX*sigmaY)) * np.exp(-((x-xNode)**2/(2*sigmaX**2) + (y-yNode)**2/(2*sigmaY**2)))
Z = Z/np.max(Z)
return x, y, Z
try:
Z_master = pickle.load(open("Z_master_final.p","rb"))
Z_centr = pickle.load(open("Z_centr_final.p","rb"))
centroids = pickle.load(open("centroids6_final.p","rb"))
except:
# Let's make that Gaussian map -- no bias for node clusters yet
sig = 1500.0
x, y, Z = bostonGauss(reBostonGeoBound[0], sigma=sig)
for locale in reBostonGeoBound[1:]:
Z = np.add(Z, bostonGauss(locale, sigma=sig)[2])
bostonLocaleGraph = nx.Graph()
bostonLocales = dict(zip(reBostonGeoBound,reBostonGeoBound))
bostonLocaleGraph.add_nodes_from(reBostonGeoBound)
fig = plt.figure(figsize=(24,24))
plt.contour(x,y,Z)
nx.draw(bostonLocaleGraph, pos=bostonLocales, node_size=100, node_color='g')
nx.draw(bostonGraphNX, pos=bostonGraphNXPos, node_size=1, node_color='g')
# K-means clustering!
### K = 6!
data = np.array(reBostonGeoBound)
centroids,_ = kmeans(data,6)
# assign each sample to a cluster
# WRAP THIS INTO ITS OWN FUNCTION
idx,_ = vq(data,centroids)
fig = plt.figure(figsize=(24,24))
ax = fig.add_subplot(111)
plot(data[idx==0,0],data[idx==0,1],'ob',
data[idx==1,0],data[idx==1,1],'or',
data[idx==2,0],data[idx==2,1],'og',
data[idx==3,0],data[idx==3,1],'oy',
data[idx==4,0],data[idx==4,1],'om',
data[idx==5,0],data[idx==5,1],'oc', markersize=20)
nx.draw(bostonGraphNX, pos=bostonGraphNXPos, node_size=1, node_color='k')
plot(centroids[:,0],centroids[:,1],'8k',markersize=30)
ax.plot()
# We can do TWO THINGS:
# 1. We can normalize each Gaussian by the cluster
# You get a more oblong Gaussian with possibility of local optima
# 2. Feed the Centroids into a Gaussian and convolve 6
# with Sigma scaled by # of nodes in cluster
# APPROACH 1 -- normalize per cluster and then add them all up
# First, let's get our data points per centroid
# These are all lists of tuples. Good job!
c0 = (zip(data[idx==0,0], data[idx==0,1]))
c1 = (zip(data[idx==1,0], data[idx==1,1]))
c2 = (zip(data[idx==2,0], data[idx==2,1]))
c3 = (zip(data[idx==3,0], data[idx==3,1]))
c4 = (zip(data[idx==4,0], data[idx==4,1]))
c5 = (zip(data[idx==5,0], data[idx==5,1]))
#c6 = (zip(data[idx==5,0], data[idx==5,1]))
c_list = [c0, c1, c2, c3, c4, c5]
# Now we find the Z values for each cluster
# Z_list[kcluster_index] will have len(k)
Z_list = []
x, y, _ = bostonGauss(c_list[0][0], sigma=sig)
for tup in c_list:
x, y, Z = bostonGauss(tup[0], sigma=sig)
Z = Z/np.max(Z)
for locale in tup[1:]:
# Z = np.divide(np.add(Z, bostonGauss(locale)[2], sigma=sig),len(tup))
Z = np.add(Z, bostonGauss(locale, sigma=sig)[2])
Z_list.append(Z/np.max(Z))
Z_list_normal = []
for item in Z_list:
item = item/np.max(item)
Z_list_normal.append(item)
Z_master = Z_list_normal[0]
for arr in Z_list_normal:
Z_master = np.add(Z_master, arr)
# APPROACH 2 -- fit our centroids
# 1. Feed the Centroids into a Gaussian and convolve 6
# with Sigma scaled by # of nodes in cluster
centroids_orig = centroids
centroids = list(tuple(map(tuple, centroids)))
x, y, Z_centr = bostonGauss(centroids[0], sigma=sig)
for locale in centroids[1:]:
newZ = bostonGauss(locale, sigma=sig)[2]
Z_centr = np.add(Z_centr, newZ)
plot_opt = 0
if plot_opt == 1:
#Plot this shizz if you want
x, y, _ = bostonGauss(reBostonGeoBound[0])
fig = plt.figure(1,figsize=(24,24))
ax1 = fig.add_subplot(111)
nx.draw(bostonGraphNX, pos=bostonGraphNXPos, node_size=1, node_color='k')
plot(centroids_orig[:,0],centroids_orig[:,1],'8k',markersize=30)
plt.contour(x,y,Z_master, linewidths=3)
ax1.plot()
fig2 = plt.figure(2,figsize=(24,24))
ax2 = fig2.add_subplot(111)
nx.draw(bostonGraphNX, pos=bostonGraphNXPos, node_size=1, node_color='k')
plot(centroids_orig[:,0],centroids_orig[:,1],'8k',markersize=30)
plt.contour(x,y,Z_centr, linewidths=3)
ax2.plot()
print "You're done processing!"
# NEATO! The plots are done and you have your topography map
# Now, you just need to assign a weight to each node
# in your original map "bostonGraphNX"
# To do this: iterate through bostonGraphNX.nodes
# Find the point in Z_master that minimizes delta(x,y)
# Make a dict of dicts where: {node: {weight=number}
# where number is actually your weight number
x, y, _ = bostonGauss(reBostonGeoBound[0])
def findNearestIndex(node, x_vec = x[0], y_vec = y[0]):
# find the nearest (x,y) to get Z for a node
x_val = node[0]
y_val = node[1]
(k_x, v_x) = min(enumerate(x_vec), key=lambda x: abs(x[1]-x_val))
(k_y, v_y) = min(enumerate(y_vec), key=lambda x: abs(x[1]-y_val))
return (k_x, k_y)
def harmonicWeight(node1, node2, Z=Z_master):
x1, y1 = findNearestIndex(node1)[0], findNearestIndex(node1)[1]
x2, y2 = findNearestIndex(node2)[0], findNearestIndex(node2)[1]
Z1 = Z[x1, y1]
Z2 = Z[x2, y2]
# A = Z1 + Z2
# return A
H = 2*Z1*Z2/(Z1+Z2)
if H > 0:
return H
else:
return 0.0001
# You will now create a new NetworkX Graph with the following:
# Each edge has a distance and weight
# The distance is distance (in m) between u and v
# The weight is the score given by your gaussian function above
# You can also add a 'location' tag to nodes so the user
# can see where they're going to be near on your route.
newNX = nx.Graph()
for edge in bostonGraphNX.edges(data=True):
# Get existing edge properties
u = edge[0]
v = edge[1]
if u in reBostonGeoBound:
nodeIndex = reBostonGeoBound.index(u)
nodeLoc = reBostonGeoLoc[nodeIndex]
elif v in reBostonGeoBound:
nodeIndex = reBostonGeoBound.index(v)
nodeLoc = reBostonGeoLoc[nodeIndex]
else:
nodeLoc = None
dist = edge[2]['weight']
newNX.add_edge(u,v, distance=dist, weight=harmonicWeight(u,v), location=nodeLoc)
## Save your weighted graph of Boston!
try:
pickle.dump(newNX, open("/home/gaurav/anaconda/BostonBikr/app/static/bostonMetroArea_Weighted_Locs.p", "wb"))
except:
print "This file already exists. Try saving as a different filename."
| mit |
ogeniz/programming | Python/dsp/thinkdsp/thinkdsp.py | 2 | 37046 | """This file contains code used in "Think DSP",
by Allen B. Downey, available from greenteapress.com
Copyright 2013 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import array
import copy
import math
import numpy
import random
import scipy
import scipy.stats
import scipy.fftpack
import struct
import subprocess
import thinkplot
import warnings
from fractions import gcd
from wave import open as open_wave
import matplotlib.pyplot as pyplot
try:
from IPython.display import Audio
except:
warnings.warn("Can't import Audio from IPython.display; "
"Wave.make_audio() will not work.")
PI2 = math.pi * 2
def random_seed(x):
"""Initialize the random and numpy.random generators.
x: int seed
"""
random.seed(x)
numpy.random.seed(x)
class UnimplementedMethodException(Exception):
"""Exception if someone calls a method that should be overridden."""
class WavFileWriter(object):
"""Writes wav files."""
def __init__(self, filename='sound.wav', framerate=11025):
"""Opens the file and sets parameters.
filename: string
framerate: samples per second
"""
self.filename = filename
self.framerate = framerate
self.nchannels = 1
self.sampwidth = 2
self.bits = self.sampwidth * 8
self.bound = 2**(self.bits-1) - 1
self.fmt = 'h'
self.dtype = numpy.int16
self.fp = open_wave(self.filename, 'w')
self.fp.setnchannels(self.nchannels)
self.fp.setsampwidth(self.sampwidth)
self.fp.setframerate(self.framerate)
def write(self, wave):
"""Writes a wave.
wave: Wave
"""
zs = wave.quantize(self.bound, self.dtype)
self.fp.writeframes(zs.tostring())
def close(self, duration=0):
"""Closes the file.
duration: how many seconds of silence to append
"""
if duration:
self.write(rest(duration))
self.fp.close()
def read_wave(filename='sound.wav'):
"""Reads a wave file.
filename: string
returns: Wave
"""
fp = open_wave(filename, 'r')
nchannels = fp.getnchannels()
nframes = fp.getnframes()
sampwidth = fp.getsampwidth()
framerate = fp.getframerate()
z_str = fp.readframes(nframes)
fp.close()
dtype_map = {1:numpy.int8, 2:numpy.int16, 3:'special', 4:numpy.int32}
if sampwidth not in dtype_map:
raise ValueError('sampwidth %d unknown' % sampwidth)
if sampwidth == 3:
xs = numpy.fromstring(z_str, dtype=numpy.int8).astype(numpy.int32)
ys = (xs[2::3] * 256 + xs[1::3]) * 256 + xs[0::3]
else:
ys = numpy.fromstring(z_str, dtype=dtype_map[sampwidth])
# if it's in stereo, just pull out the first channel
if nchannels == 2:
ys = ys[::2]
wave = Wave(ys, framerate)
return wave
def play_wave(filename='sound.wav', player='aplay'):
"""Plays a wave file.
filename: string
player: string name of executable that plays wav files
"""
cmd = '%s %s' % (player, filename)
popen = subprocess.Popen(cmd, shell=True)
popen.communicate()
class _SpectrumParent(object):
"""Contains code common to Spectrum and DCT.
"""
def copy(self):
"""Makes a copy.
Returns: new Spectrum
"""
return copy.deepcopy(self)
@property
def max_freq(self):
return self.framerate / 2.0
@property
def freq_res(self):
return self.max_freq / (len(self.fs) - 1)
def plot(self, low=0, high=None, **options):
"""Plots amplitude vs frequency.
low: int index to start at
high: int index to end at
"""
thinkplot.plot(self.fs[low:high], self.amps[low:high], **options)
def plot_power(self, low=0, high=None, **options):
"""Plots power vs frequency.
low: int index to start at
high: int index to end at
"""
thinkplot.plot(self.fs[low:high], self.power[low:high], **options)
def estimate_slope(self):
"""Runs linear regression on log power vs log frequency.
returns: slope, inter, r2, p, stderr
"""
x = numpy.log(self.fs[1:])
y = numpy.log(self.power[1:])
t = scipy.stats.linregress(x,y)
return t
def peaks(self):
"""Finds the highest peaks and their frequencies.
returns: sorted list of (amplitude, frequency) pairs
"""
t = zip(self.amps, self.fs)
t.sort(reverse=True)
return t
class Spectrum(_SpectrumParent):
"""Represents the spectrum of a signal."""
def __init__(self, hs, framerate):
"""Initializes a spectrum.
hs: NumPy array of complex
framerate: frames per second
"""
self.hs = hs
self.framerate = framerate
# the frequency for each component of the spectrum depends
# on whether the length of the wave is even or odd.
# see http://docs.scipy.org/doc/numpy/reference/generated/
# numpy.fft.rfft.html
n = len(hs)
if n%2 == 0:
max_freq = self.max_freq
else:
max_freq = self.max_freq * (n-1) / n
self.fs = numpy.linspace(0, max_freq, n)
def __len__(self):
"""Length of the spectrum."""
return len(self.hs)
def __add__(self, other):
"""Adds two spectrums elementwise.
other: Spectrum
returns: new Spectrum
"""
if other == 0:
return self
assert self.framerate == other.framerate
hs = self.hs + other.hs
return Spectrum(hs, self.framerate)
__radd__ = __add__
def __mul__(self, other):
"""Multiplies two spectrums.
other: Spectrum
returns: new Spectrum
"""
# the spectrums have to have the same framerate and duration
assert self.framerate == other.framerate
assert len(self) == len(other)
hs = self.hs * other.hs
return Spectrum(hs, self.framerate)
@property
def real(self):
"""Returns the real part of the hs (read-only property)."""
return numpy.real(self.hs)
@property
def imag(self):
"""Returns the imaginary part of the hs (read-only property)."""
return numpy.imag(self.hs)
@property
def amps(self):
"""Returns a sequence of amplitudes (read-only property)."""
return numpy.absolute(self.hs)
@property
def power(self):
"""Returns a sequence of powers (read-only property)."""
return self.amps ** 2
def low_pass(self, cutoff, factor=0):
"""Attenuate frequencies above the cutoff.
cutoff: frequency in Hz
factor: what to multiply the magnitude by
"""
for i in xrange(len(self.hs)):
if self.fs[i] > cutoff:
self.hs[i] *= factor
def high_pass(self, cutoff, factor=0):
"""Attenuate frequencies below the cutoff.
cutoff: frequency in Hz
factor: what to multiply the magnitude by
"""
for i in xrange(len(self.hs)):
if self.fs[i] < cutoff:
self.hs[i] *= factor
def band_stop(self, low_cutoff, high_cutoff, factor=0):
"""Attenuate frequencies between the cutoffs.
low_cutoff: frequency in Hz
high_cutoff: frequency in Hz
factor: what to multiply the magnitude by
"""
for i in xrange(len(self.hs)):
if low_cutoff < self.fs[i] < high_cutoff:
self.hs[i] *= factor
def pink_filter(self, beta=1):
"""Apply a filter that would make white noise pink.
beta: exponent of the pink noise
"""
denom = self.fs ** (beta/2.0)
denom[0] = 1
self.hs /= denom
def differentiate(self):
"""Apply the differentiation filter.
"""
i = complex(0, 1)
filtr = PI2 * i * self.fs
self.hs *= filtr
def angles(self):
"""Computes phase angles in radians.
returns: list of phase angles
"""
return numpy.angle(self.hs)
def make_integrated_spectrum(self):
"""Makes an integrated spectrum.
"""
cs = numpy.cumsum(self.power)
cs /= cs[-1]
return IntegratedSpectrum(cs, self.fs)
def make_wave(self):
"""Transforms to the time domain.
returns: Wave
"""
ys = numpy.fft.irfft(self.hs)
return Wave(ys, self.framerate)
class IntegratedSpectrum(object):
"""Represents the integral of a spectrum."""
def __init__(self, cs, fs):
"""Initializes an integrated spectrum:
cs: sequence of cumulative amplitudes
fs: sequence of frequences
"""
self.cs = cs
self.fs = fs
def plot_power(self, low=0, high=None, expo=False, **options):
"""Plots the integrated spectrum.
low: int index to start at
high: int index to end at
"""
cs = self.cs[low:high]
fs = self.fs[low:high]
if expo:
cs = numpy.exp(cs)
thinkplot.plot(fs, cs, **options)
def estimate_slope(self, low=1, high=-12000):
"""Runs linear regression on log cumulative power vs log frequency.
returns: slope, inter, r2, p, stderr
"""
#print self.fs[low:high]
#print self.cs[low:high]
x = numpy.log(self.fs[low:high])
y = numpy.log(self.cs[low:high])
t = scipy.stats.linregress(x,y)
return t
class Dct(_SpectrumParent):
"""Represents the spectrum of a signal using discrete cosine transform."""
def __init__(self, amps, framerate):
self.amps = amps
self.framerate = framerate
n = len(amps)
self.fs = numpy.arange(n) / float(n) * self.max_freq
def __add__(self, other):
"""Adds two DCTs elementwise.
other: DCT
returns: new DCT
"""
if other == 0:
return self
assert self.framerate == other.framerate
amps = self.amps + other.amps
return Dct(amps, self.framerate)
__radd__ = __add__
def make_wave(self):
"""Transforms to the time domain.
returns: Wave
"""
ys = scipy.fftpack.dct(self.amps, type=3) / 2
return Wave(ys, self.framerate)
class Spectrogram(object):
"""Represents the spectrum of a signal."""
def __init__(self, spec_map, seg_length=512, window_func=None):
"""Initialize the spectrogram.
spec_map: map from float time to Spectrum
seg_length: number of samples in each segment
window_func: function that computes the window
"""
self.spec_map = spec_map
self.seg_length = seg_length
self.window_func = window_func
def any_spectrum(self):
"""Returns an arbitrary spectrum from the spectrogram."""
return self.spec_map.itervalues().next()
@property
def time_res(self):
"""Time resolution in seconds."""
spectrum = self.any_spectrum()
return float(self.seg_length) / spectrum.framerate
@property
def freq_res(self):
"""Frequency resolution in Hz."""
return self.any_spectrum().freq_res
def times(self):
"""Sorted sequence of times.
returns: sequence of float times in seconds
"""
ts = sorted(self.spec_map.iterkeys())
return ts
def frequencies(self):
"""Sequence of frequencies.
returns: sequence of float freqencies in Hz.
"""
fs = self.any_spectrum().fs
return fs
def plot(self, low=0, high=None, **options):
"""Make a pseudocolor plot.
low: index of the lowest frequency component to plot
high: index of the highest frequency component to plot
"""
ts = self.times()
fs = self.frequencies()[low:high]
# make the array
size = len(fs), len(ts)
array = numpy.zeros(size, dtype=numpy.float)
# copy amplitude from each spectrum into a column of the array
for i, t in enumerate(ts):
spectrum = self.spec_map[t]
array[:,i] = spectrum.amps[low:high]
thinkplot.pcolor(ts, fs, array, **options)
def make_wave(self):
"""Inverts the spectrogram and returns a Wave.
returns: Wave
"""
res = []
for t, spectrum in sorted(self.spec_map.iteritems()):
wave = spectrum.make_wave()
n = len(wave)
if self.window_func:
window = 1 / self.window_func(n)
wave.window(window)
i = int(round(t * wave.framerate))
start = i - n / 2
end = start + n
res.append((start, end, wave))
starts, ends, waves = zip(*res)
low = min(starts)
high = max(ends)
ys = numpy.zeros(high-low, numpy.float)
for start, end, wave in res:
ys[start:end] = wave.ys
return Wave(ys, wave.framerate)
class Wave(object):
"""Represents a discrete-time waveform.
Note: the ys attribute is a "wave array" which is a numpy
array of floats.
"""
def __init__(self, ys, framerate, start=0):
"""Initializes the wave.
ys: wave array
framerate: samples per second
"""
self.ys = ys
self.framerate = framerate
self.start = start
def copy(self):
"""Makes a copy.
Returns: new Wave
"""
return copy.deepcopy(self)
def __len__(self):
return len(self.ys)
@property
def ts(self):
"""Times (property).
returns: NumPy array of times
"""
n = len(self.ys)
return numpy.linspace(0, self.duration, n)
@property
def duration(self):
"""Duration (property).
returns: float duration in seconds
"""
return len(self.ys) / float(self.framerate)
def __add__(self, other):
"""Adds two waves elementwise.
other: Wave
returns: new Wave
"""
if other == 0:
return self
assert self.framerate == other.framerate
n1, n2 = len(self), len(other)
if n1 > n2:
ys = self.ys.copy()
ys[:n2] += other.ys
else:
ys = other.ys.copy()
ys[:n1] += self.ys
return Wave(ys, self.framerate)
__radd__ = __add__
def __or__(self, other):
"""Concatenates two waves.
other: Wave
returns: Wave
"""
if self.framerate != other.framerate:
raise ValueError('Wave.__or__: framerates do not agree')
ys = numpy.concatenate((self.ys, other.ys))
return Wave(ys, self.framerate)
def __mul__(self, other):
"""Convolves two waves.
other: Wave
returns: Wave
"""
if self.framerate != other.framerate:
raise ValueError('Wave convolution: framerates do not agree')
ys = numpy.convolve(self.ys, other.ys, mode='full')
ys = ys[:len(self.ys)]
return Wave(ys, self.framerate)
def quantize(self, bound, dtype):
"""Maps the waveform to quanta.
bound: maximum amplitude
dtype: numpy data type or string
returns: quantized signal
"""
return quantize(self.ys, bound, dtype)
def apodize(self, denom=20, duration=0.1):
"""Tapers the amplitude at the beginning and end of the signal.
Tapers either the given duration of time or the given
fraction of the total duration, whichever is less.
denom: float fraction of the segment to taper
duration: float duration of the taper in seconds
"""
self.ys = apodize(self.ys, self.framerate, denom, duration)
def hamming(self):
"""Apply a Hamming window to the wave.
"""
self.ys *= numpy.hamming(len(self.ys))
def window(self, window):
"""Apply a window to the wave.
window: sequence of multipliers, same length as self.ys
"""
self.ys *= window
def scale(self, factor):
"""Multplies the wave by a factor.
factor: scale factor
"""
self.ys *= factor
def shift(self, shift):
"""Shifts the wave left or right by index shift.
shift: integer number of places to shift
"""
if shift < 0:
self.ys = shift_left(self.ys, shift)
if shift > 0:
self.ys = shift_right(self.ys, shift)
def truncate(self, n):
"""Trims this wave to the given length.
"""
self.ys = truncate(self.ys, n)
def normalize(self, amp=1.0):
"""Normalizes the signal to the given amplitude.
amp: float amplitude
"""
self.ys = normalize(self.ys, amp=amp)
def unbias(self):
"""Unbiases the signal.
"""
self.ys = unbias(self.ys)
def segment(self, start=0, duration=None):
"""Extracts a segment.
start: float start time in seconds
duration: float duration in seconds
returns: Wave
"""
i = round(start * self.framerate)
if duration is None:
j = None
else:
j = i + round(duration * self.framerate)
ys = self.ys[i:j]
return Wave(ys, self.framerate)
def make_spectrum(self):
"""Computes the spectrum using FFT.
returns: Spectrum
"""
hs = numpy.fft.rfft(self.ys)
return Spectrum(hs, self.framerate)
def make_dct(self):
amps = scipy.fftpack.dct(self.ys, type=2)
return Dct(amps, self.framerate)
def make_spectrogram(self, seg_length, window_func=numpy.hamming):
"""Computes the spectrogram of the wave.
seg_length: number of samples in each segment
window_func: function used to compute the window
returns: Spectrogram
"""
n = len(self.ys)
window = window_func(seg_length)
start, end, step = 0, seg_length, seg_length / 2
spec_map = {}
while end < n:
ys = self.ys[start:end] * window
hs = numpy.fft.rfft(ys)
t = (start + end) / 2.0 / self.framerate
spec_map[t] = Spectrum(hs, self.framerate)
start += step
end += step
return Spectrogram(spec_map, seg_length, window_func)
def plot(self, **options):
"""Plots the wave.
"""
thinkplot.plot(self.ts, self.ys, **options)
def corr(self, other):
"""Correlation coefficient two waves.
other: Wave
returns: float coefficient of correlation
"""
corr = numpy.corrcoef(self.ys, other.ys)[0, 1]
return corr
def cov_mat(self, other):
"""Covariance matrix of two waves.
other: Wave
returns: 2x2 covariance matrix
"""
return numpy.cov(self.ys, other.ys)
def cov(self, other):
"""Covariance of two unbiased waves.
other: Wave
returns: float
"""
total = sum(self.ys * other.ys) / len(self.ys)
return total
def cos_cov(self, k):
"""Covariance with a cosine signal.
freq: freq of the cosine signal in Hz
returns: float covariance
"""
n = len(self.ys)
factor = math.pi * k / n
ys = [math.cos(factor * (i+0.5)) for i in range(n)]
total = 2 * sum(self.ys * ys)
return total
def cos_transform(self):
"""Discrete cosine transform.
returns: list of frequency, cov pairs
"""
n = len(self.ys)
res = []
for k in range(n):
cov = self.cos_cov(k)
res.append((k, cov))
return res
def write(self, filename='sound.wav'):
"""Write a wave file.
filename: string
"""
print('Writing', filename)
wfile = WavFileWriter(filename, self.framerate)
wfile.write(self)
wfile.close()
def play(self, filename='sound.wav'):
"""Plays a wave file.
filename: string
"""
self.write(filename)
play_wave(filename)
def make_audio(self):
"""Makes an IPython Audio object.
"""
audio = Audio(data=self.ys, rate=self.framerate)
return audio
def unbias(ys):
"""Shifts a wave array so it has mean 0.
ys: wave array
returns: wave array
"""
return ys - ys.mean()
def normalize(ys, amp=1.0):
"""Normalizes a wave array so the maximum amplitude is +amp or -amp.
ys: wave array
amp: max amplitude (pos or neg) in result
returns: wave array
"""
high, low = abs(max(ys)), abs(min(ys))
return amp * ys / max(high, low)
def shift_right(ys, shift):
"""Shifts a wave array to the right and zero pads.
ys: wave array
shift: integer shift
returns: wave array
"""
res = numpy.zeros(len(ys) + shift)
res[shift:] = ys
return res
def shift_left(ys, shift):
"""Shifts a wave array to the left.
ys: wave array
shift: integer shift
returns: wave array
"""
return ys[shift:]
def truncate(ys, n):
"""Trims a wave array to the given length.
ys: wave array
n: integer length
returns: wave array
"""
return ys[:n]
def quantize(ys, bound, dtype):
"""Maps the waveform to quanta.
ys: wave array
bound: maximum amplitude
dtype: numpy data type of the result
returns: quantized signal
"""
if max(ys) > 1 or min(ys) < -1:
warnings.warn('Warning: normalizing before quantizing.')
ys = normalize(ys)
zs = (ys * bound).astype(dtype)
return zs
def apodize(ys, framerate, denom=20, duration=0.1):
"""Tapers the amplitude at the beginning and end of the signal.
Tapers either the given duration of time or the given
fraction of the total duration, whichever is less.
ys: wave array
framerate: int frames per second
denom: float fraction of the segment to taper
duration: float duration of the taper in seconds
returns: wave array
"""
# a fixed fraction of the segment
n = len(ys)
k1 = n // denom
# a fixed duration of time
k2 = int(duration * framerate)
k = min(k1, k2)
w1 = numpy.linspace(0, 1, k)
w2 = numpy.ones(n - 2*k)
w3 = numpy.linspace(1, 0, k)
window = numpy.concatenate((w1, w2, w3))
return ys * window
class Signal(object):
"""Represents a time-varying signal."""
def __add__(self, other):
"""Adds two signals.
other: Signal
returns: Signal
"""
if other == 0:
return self
return SumSignal(self, other)
__radd__ = __add__
@property
def period(self):
"""Period of the signal in seconds (property).
For non-periodic signals, use the default, 0.1 seconds
returns: float seconds
"""
return 0.1
def plot(self, framerate=11025):
"""Plots the signal.
framerate: samples per second
"""
duration = self.period * 3
wave = self.make_wave(duration, start=0, framerate=framerate)
wave.plot()
def make_wave(self, duration=1, start=0, framerate=11025):
"""Makes a Wave object.
duration: float seconds
start: float seconds
framerate: int frames per second
returns: Wave
"""
dt = 1.0 / framerate
ts = numpy.arange(start, duration, dt)
ys = self.evaluate(ts)
return Wave(ys, framerate=framerate, start=start)
def infer_framerate(ts):
"""Given ts, find the framerate.
Assumes that the ts are equally spaced.
ts: sequence of times in seconds
returns: frames per second
"""
dt = ts[1] - ts[0]
framerate = 1.0 / dt
return framerate
class SumSignal(Signal):
"""Represents the sum of signals."""
def __init__(self, *args):
"""Initializes the sum.
args: tuple of signals
"""
self.signals = args
@property
def period(self):
"""Period of the signal in seconds.
Note: this is not correct; it's mostly a placekeeper.
But it is correct for a harmonic sequence where all
component frequencies are multiples of the fundamental.
returns: float seconds
"""
return max(sig.period for sig in self.signals)
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
return sum(sig.evaluate(ts) for sig in self.signals)
class Sinusoid(Signal):
"""Represents a sinusoidal signal."""
def __init__(self, freq=440, amp=1.0, offset=0, func=numpy.sin):
"""Initializes a sinusoidal signal.
freq: float frequency in Hz
amp: float amplitude, 1.0 is nominal max
offset: float phase offset in radians
func: function that maps phase to amplitude
"""
self.freq = freq
self.amp = amp
self.offset = offset
self.func = func
@property
def period(self):
"""Period of the signal in seconds.
returns: float seconds
"""
return 1.0 / self.freq
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
phases = PI2 * self.freq * ts + self.offset
ys = self.amp * self.func(phases)
return ys
def CosSignal(freq=440, amp=1.0, offset=0):
"""Makes a cosine Sinusoid.
freq: float frequency in Hz
amp: float amplitude, 1.0 is nominal max
offset: float phase offset in radians
returns: Sinusoid object
"""
return Sinusoid(freq, amp, offset, func=numpy.cos)
def SinSignal(freq=440, amp=1.0, offset=0):
"""Makes a sine Sinusoid.
freq: float frequency in Hz
amp: float amplitude, 1.0 is nominal max
offset: float phase offset in radians
returns: Sinusoid object
"""
return Sinusoid(freq, amp, offset, func=numpy.sin)
class ComplexSinusoid(Sinusoid):
"""Represents a complex exponential signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
i = complex(0, 1)
phases = PI2 * self.freq * ts + self.offset
ys = self.amp * numpy.exp(i * phases)
return ys
class SquareSignal(Sinusoid):
"""Represents a square signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = self.amp * numpy.sign(unbias(frac))
return ys
class SawtoothSignal(Sinusoid):
"""Represents a sawtooth signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = normalize(unbias(frac), self.amp)
return ys
class ParabolicSignal(Sinusoid):
"""Represents a parabolic signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = (frac - 0.5)**2
ys = normalize(unbias(ys), self.amp)
return ys
class GlottalSignal(Sinusoid):
"""Represents a periodic signal that resembles a glottal signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = frac**4 * (1-frac)
ys = normalize(unbias(ys), self.amp)
return ys
class TriangleSignal(Sinusoid):
"""Represents a triangle signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = numpy.abs(frac - 0.5)
ys = normalize(unbias(ys), self.amp)
return ys
class Chirp(Signal):
"""Represents a signal with variable frequency."""
def __init__(self, start=440, end=880, amp=1.0):
"""Initializes a linear chirp.
start: float frequency in Hz
end: float frequency in Hz
amp: float amplitude, 1.0 is nominal max
"""
self.start = start
self.end = end
self.amp = amp
@property
def period(self):
"""Period of the signal in seconds.
returns: float seconds
"""
return ValueError('Non-periodic signal.')
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
freqs = numpy.linspace(self.start, self.end, len(ts)-1)
return self._evaluate(ts, freqs)
def _evaluate(self, ts, freqs):
"""Helper function that evaluates the signal.
ts: float array of times
freqs: float array of frequencies during each interval
"""
dts = numpy.diff(ts)
dps = PI2 * freqs * dts
phases = numpy.cumsum(dps)
phases = numpy.insert(phases, 0, 0)
ys = self.amp * numpy.cos(phases)
return ys
class ExpoChirp(Chirp):
"""Represents a signal with varying frequency."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
start, end = math.log10(self.start), math.log10(self.end)
freqs = numpy.logspace(start, end, len(ts)-1)
return self._evaluate(ts, freqs)
class SilentSignal(Signal):
"""Represents silence."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
return numpy.zeros(len(ts))
class _Noise(Signal):
"""Represents a noise signal (abstract parent class)."""
def __init__(self, amp=1.0):
"""Initializes a white noise signal.
amp: float amplitude, 1.0 is nominal max
"""
self.amp = amp
@property
def period(self):
"""Period of the signal in seconds.
returns: float seconds
"""
return ValueError('Non-periodic signal.')
class UncorrelatedUniformNoise(_Noise):
"""Represents uncorrelated uniform noise."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
ys = numpy.random.uniform(-self.amp, self.amp, len(ts))
return ys
class UncorrelatedGaussianNoise(_Noise):
"""Represents uncorrelated gaussian noise."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
ys = numpy.random.normal(0, self.amp, len(ts))
return ys
class BrownianNoise(_Noise):
"""Represents Brownian noise, aka red noise."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
Computes Brownian noise by taking the cumulative sum of
a uniform random series.
ts: float array of times
returns: float wave array
"""
dys = numpy.random.uniform(-1, 1, len(ts))
#ys = scipy.integrate.cumtrapz(dys, ts)
ys = numpy.cumsum(dys)
ys = normalize(unbias(ys), self.amp)
return ys
class PinkNoise(_Noise):
"""Represents Brownian noise, aka red noise."""
def __init__(self, amp=1.0, beta=1.0):
"""Initializes a pink noise signal.
amp: float amplitude, 1.0 is nominal max
"""
self.amp = amp
self.beta = beta
def make_wave(self, duration=1, start=0, framerate=11025):
"""Makes a Wave object.
duration: float seconds
start: float seconds
framerate: int frames per second
returns: Wave
"""
signal = UncorrelatedUniformNoise()
wave = signal.make_wave(duration, start, framerate)
spectrum = wave.make_spectrum()
spectrum.pink_filter(beta=self.beta)
wave2 = spectrum.make_wave()
wave2.unbias()
wave2.normalize(self.amp)
return wave2
def rest(duration):
"""Makes a rest of the given duration.
duration: float seconds
returns: Wave
"""
signal = SilentSignal()
wave = signal.make_wave(duration)
return wave
def make_note(midi_num, duration, sig_cons=CosSignal, framerate=11025):
"""Make a MIDI note with the given duration.
midi_num: int MIDI note number
duration: float seconds
sig_cons: Signal constructor function
framerate: int frames per second
returns: Wave
"""
freq = midi_to_freq(midi_num)
signal = sig_cons(freq)
wave = signal.make_wave(duration, framerate=framerate)
wave.apodize()
return wave
def make_chord(midi_nums, duration, sig_cons=CosSignal, framerate=11025):
"""Make a chord with the given duration.
midi_nums: sequence of int MIDI note numbers
duration: float seconds
sig_cons: Signal constructor function
framerate: int frames per second
returns: Wave
"""
freqs = [midi_to_freq(num) for num in midi_nums]
signal = sum(sig_cons(freq) for freq in freqs)
wave = signal.make_wave(duration, framerate=framerate)
wave.apodize()
return wave
def midi_to_freq(midi_num):
"""Converts MIDI note number to frequency.
midi_num: int MIDI note number
returns: float frequency in Hz
"""
x = (midi_num - 69) / 12.0
freq = 440.0 * 2**x
return freq
def sin_wave(freq, duration=1, offset=0):
"""Makes a sine wave with the given parameters.
freq: float cycles per second
duration: float seconds
offset: float radians
returns: Wave
"""
signal = SinSignal(freq, offset=offset)
wave = signal.make_wave(duration)
return wave
def cos_wave(freq, duration=1, offset=0):
"""Makes a cosine wave with the given parameters.
freq: float cycles per second
duration: float seconds
offset: float radians
returns: Wave
"""
signal = CosSignal(freq, offset=offset)
wave = signal.make_wave(duration)
return wave
def mag(a):
"""Computes the magnitude of a numpy array.
a: numpy array
returns: float
"""
return numpy.sqrt(numpy.dot(a, a))
def main():
cos_basis = cos_wave(440)
sin_basis = sin_wave(440)
wave = cos_wave(440, offset=math.pi/2)
cos_cov = cos_basis.cov(wave)
sin_cov = sin_basis.cov(wave)
print(cos_cov, sin_cov, mag((cos_cov, sin_cov)))
return
wfile = WavFileWriter()
for sig_cons in [SinSignal, TriangleSignal, SawtoothSignal,
GlottalSignal, ParabolicSignal, SquareSignal]:
print(sig_cons)
sig = sig_cons(440)
wave = sig.make_wave(1)
wave.apodize()
wfile.write(wave)
wfile.close()
return
signal = GlottalSignal(440)
signal.plot()
pyplot.show()
return
wfile = WavFileWriter()
for m in range(60, 0, -1):
wfile.write(make_note(m, 0.25))
wfile.close()
return
wave1 = make_note(69, 1)
wave2 = make_chord([69, 72, 76], 1)
wave = wave1 | wave2
wfile = WavFileWriter()
wfile.write(wave)
wfile.close()
return
sig1 = CosSignal(freq=440)
sig2 = CosSignal(freq=523.25)
sig3 = CosSignal(freq=660)
sig4 = CosSignal(freq=880)
sig5 = CosSignal(freq=987)
sig = sig1 + sig2 + sig3 + sig4
#wave = Wave(sig, duration=0.02)
#wave.plot()
wave = sig.make_wave(duration=1)
#wave.normalize()
wfile = WavFileWriter(wave)
wfile.write()
wfile.close()
if __name__ == '__main__':
main()
| gpl-2.0 |
rs2/pandas | pandas/tests/series/indexing/test_alter_index.py | 2 | 9723 | import numpy as np
import pytest
import pandas as pd
from pandas import Categorical, Series, date_range, isna
import pandas._testing as tm
def test_reindex(datetime_series, string_series):
identity = string_series.reindex(string_series.index)
# __array_interface__ is not defined for older numpies
# and on some pythons
try:
assert np.may_share_memory(string_series.index, identity.index)
except AttributeError:
pass
assert identity.index.is_(string_series.index)
assert identity.index.identical(string_series.index)
subIndex = string_series.index[10:20]
subSeries = string_series.reindex(subIndex)
for idx, val in subSeries.items():
assert val == string_series[idx]
subIndex2 = datetime_series.index[10:20]
subTS = datetime_series.reindex(subIndex2)
for idx, val in subTS.items():
assert val == datetime_series[idx]
stuffSeries = datetime_series.reindex(subIndex)
assert np.isnan(stuffSeries).all()
# This is extremely important for the Cython code to not screw up
nonContigIndex = datetime_series.index[::2]
subNonContig = datetime_series.reindex(nonContigIndex)
for idx, val in subNonContig.items():
assert val == datetime_series[idx]
# return a copy the same index here
result = datetime_series.reindex()
assert not (result is datetime_series)
def test_reindex_nan():
ts = Series([2, 3, 5, 7], index=[1, 4, np.nan, 8])
i, j = [np.nan, 1, np.nan, 8, 4, np.nan], [2, 0, 2, 3, 1, 2]
tm.assert_series_equal(ts.reindex(i), ts.iloc[j])
ts.index = ts.index.astype("object")
# reindex coerces index.dtype to float, loc/iloc doesn't
tm.assert_series_equal(ts.reindex(i), ts.iloc[j], check_index_type=False)
def test_reindex_series_add_nat():
rng = date_range("1/1/2000 00:00:00", periods=10, freq="10s")
series = Series(rng)
result = series.reindex(range(15))
assert np.issubdtype(result.dtype, np.dtype("M8[ns]"))
mask = result.isna()
assert mask[-5:].all()
assert not mask[:-5].any()
def test_reindex_with_datetimes():
rng = date_range("1/1/2000", periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
expected.index = expected.index._with_freq(None)
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_reindex_corner(datetime_series):
# (don't forget to fix this) I think it's fixed
empty = Series(dtype=object)
empty.reindex(datetime_series.index, method="pad") # it works
# corner case: pad empty series
reindexed = empty.reindex(datetime_series.index, method="pad")
# pass non-Index
reindexed = datetime_series.reindex(list(datetime_series.index))
datetime_series.index = datetime_series.index._with_freq(None)
tm.assert_series_equal(datetime_series, reindexed)
# bad fill method
ts = datetime_series[::2]
msg = (
r"Invalid fill method\. Expecting pad \(ffill\), backfill "
r"\(bfill\) or nearest\. Got foo"
)
with pytest.raises(ValueError, match=msg):
ts.reindex(datetime_series.index, method="foo")
def test_reindex_pad():
s = Series(np.arange(10), dtype="int64")
s2 = s[::2]
reindexed = s2.reindex(s.index, method="pad")
reindexed2 = s2.reindex(s.index, method="ffill")
tm.assert_series_equal(reindexed, reindexed2)
expected = Series([0, 0, 2, 2, 4, 4, 6, 6, 8, 8], index=np.arange(10))
tm.assert_series_equal(reindexed, expected)
# GH4604
s = Series([1, 2, 3, 4, 5], index=["a", "b", "c", "d", "e"])
new_index = ["a", "g", "c", "f"]
expected = Series([1, 1, 3, 3], index=new_index)
# this changes dtype because the ffill happens after
result = s.reindex(new_index).ffill()
tm.assert_series_equal(result, expected.astype("float64"))
result = s.reindex(new_index).ffill(downcast="infer")
tm.assert_series_equal(result, expected)
expected = Series([1, 5, 3, 5], index=new_index)
result = s.reindex(new_index, method="ffill")
tm.assert_series_equal(result, expected)
# inference of new dtype
s = Series([True, False, False, True], index=list("abcd"))
new_index = "agc"
result = s.reindex(list(new_index)).ffill()
expected = Series([True, True, False], index=list(new_index))
tm.assert_series_equal(result, expected)
# GH4618 shifted series downcasting
s = Series(False, index=range(0, 5))
result = s.shift(1).fillna(method="bfill")
expected = Series(False, index=range(0, 5))
tm.assert_series_equal(result, expected)
def test_reindex_nearest():
s = Series(np.arange(10, dtype="int64"))
target = [0.1, 0.9, 1.5, 2.0]
result = s.reindex(target, method="nearest")
expected = Series(np.around(target).astype("int64"), target)
tm.assert_series_equal(expected, result)
result = s.reindex(target, method="nearest", tolerance=0.2)
expected = Series([0, 1, np.nan, 2], target)
tm.assert_series_equal(expected, result)
result = s.reindex(target, method="nearest", tolerance=[0.3, 0.01, 0.4, 3])
expected = Series([0, np.nan, np.nan, 2], target)
tm.assert_series_equal(expected, result)
def test_reindex_backfill():
pass
def test_reindex_int(datetime_series):
ts = datetime_series[::2]
int_ts = Series(np.zeros(len(ts), dtype=int), index=ts.index)
# this should work fine
reindexed_int = int_ts.reindex(datetime_series.index)
# if NaNs introduced
assert reindexed_int.dtype == np.float_
# NO NaNs introduced
reindexed_int = int_ts.reindex(int_ts.index[::2])
assert reindexed_int.dtype == np.int_
def test_reindex_bool(datetime_series):
# A series other than float, int, string, or object
ts = datetime_series[::2]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
# this should work fine
reindexed_bool = bool_ts.reindex(datetime_series.index)
# if NaNs introduced
assert reindexed_bool.dtype == np.object_
# NO NaNs introduced
reindexed_bool = bool_ts.reindex(bool_ts.index[::2])
assert reindexed_bool.dtype == np.bool_
def test_reindex_bool_pad(datetime_series):
# fail
ts = datetime_series[5:]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
filled_bool = bool_ts.reindex(datetime_series.index, method="pad")
assert isna(filled_bool[:5]).all()
def test_reindex_categorical():
index = date_range("20000101", periods=3)
# reindexing to an invalid Categorical
s = Series(["a", "b", "c"], dtype="category")
result = s.reindex(index)
expected = Series(
Categorical(values=[np.nan, np.nan, np.nan], categories=["a", "b", "c"])
)
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=["b", "c"], categories=["a", "b", "c"]))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(values=["c", np.nan], categories=["a", "b", "c"]))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_reindex_fill_value():
# -----------------------------------------------------------
# floats
floats = Series([1.0, 2.0, 3.0])
result = floats.reindex([1, 2, 3])
expected = Series([2.0, 3.0, np.nan], index=[1, 2, 3])
tm.assert_series_equal(result, expected)
result = floats.reindex([1, 2, 3], fill_value=0)
expected = Series([2.0, 3.0, 0], index=[1, 2, 3])
tm.assert_series_equal(result, expected)
# -----------------------------------------------------------
# ints
ints = Series([1, 2, 3])
result = ints.reindex([1, 2, 3])
expected = Series([2.0, 3.0, np.nan], index=[1, 2, 3])
tm.assert_series_equal(result, expected)
# don't upcast
result = ints.reindex([1, 2, 3], fill_value=0)
expected = Series([2, 3, 0], index=[1, 2, 3])
assert issubclass(result.dtype.type, np.integer)
tm.assert_series_equal(result, expected)
# -----------------------------------------------------------
# objects
objects = Series([1, 2, 3], dtype=object)
result = objects.reindex([1, 2, 3])
expected = Series([2, 3, np.nan], index=[1, 2, 3], dtype=object)
tm.assert_series_equal(result, expected)
result = objects.reindex([1, 2, 3], fill_value="foo")
expected = Series([2, 3, "foo"], index=[1, 2, 3], dtype=object)
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------
# bools
bools = Series([True, False, True])
result = bools.reindex([1, 2, 3])
expected = Series([False, True, np.nan], index=[1, 2, 3], dtype=object)
tm.assert_series_equal(result, expected)
result = bools.reindex([1, 2, 3], fill_value=False)
expected = Series([False, True, False], index=[1, 2, 3])
tm.assert_series_equal(result, expected)
def test_reindex_datetimeindexes_tz_naive_and_aware():
# GH 8306
idx = date_range("20131101", tz="America/Chicago", periods=7)
newidx = date_range("20131103", periods=10, freq="H")
s = Series(range(7), index=idx)
msg = "Cannot compare tz-naive and tz-aware timestamps"
with pytest.raises(TypeError, match=msg):
s.reindex(newidx, method="ffill")
def test_reindex_empty_series_tz_dtype():
# GH 20869
result = Series(dtype="datetime64[ns, UTC]").reindex([0, 1])
expected = Series([pd.NaT] * 2, dtype="datetime64[ns, UTC]")
tm.assert_equal(result, expected)
| bsd-3-clause |
AkademieOlympia/sympy | sympy/external/tests/test_importtools.py | 91 | 1215 | from sympy.external import import_module
# fixes issue that arose in addressing issue 6533
def test_no_stdlib_collections():
'''
make sure we get the right collections when it is not part of a
larger list
'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['cm', 'collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
assert collections != matplotlib.collections
def test_no_stdlib_collections2():
'''
make sure we get the right collections when it is not part of a
larger list
'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
assert collections != matplotlib.collections
def test_no_stdlib_collections3():
'''make sure we get the right collections with no catch'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['cm', 'collections']},
min_module_version='1.1.0')
if matplotlib:
assert collections != matplotlib.collections
| bsd-3-clause |
ChristianTremblay/ddcmath | tests/test_temperature.py | 1 | 1644 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 by Christian Tremblay, P.Eng <[email protected]>
#
# Licensed under GPLv3, see file LICENSE in this source tree.
from __future__ import division
from ddcmath.temperature import f2c, c2f, delta_c2f, delta_f2c, oat_percent, mkt
from ddcmath.tolerance import abs_relative_error
from ddcmath.exceptions import InaccuracyException
import pytest
try:
import numpy as np
import pandas as pd
PERM_MKT = True
except ImportError:
PERM_MKT = False
def test_f2c():
"""
Error must be lower than 0.001%
"""
assert abs_relative_error(f2c(32), 0) < 0.001
assert abs_relative_error(f2c(-40), -40) < 0.001
def test_c2f():
"""
Error must be lower than 0.001%
"""
assert abs_relative_error(c2f(0), 32) < 0.001
assert abs_relative_error(c2f(-40), -40) < 0.001
def test_delta_c2f():
assert delta_c2f(1) == 9 / 5
def test_delta_f2c():
assert delta_f2c(1) == 5 / 9
def test_oa_proportion():
assert oat_percent(0, 20, 10) == 0.5
def test_inaccuracy_of_oa_prop():
with pytest.raises(InaccuracyException):
oat_percent(20.000001, 20, 10) == 0.5
def test_mkt():
if not PERM_MKT:
return True
records = [19.8, 20.2, 20.6, 21, 21.3, 21.5]
start = pd.datetime(2018, 9, 11, 0, 0)
end = pd.datetime(2018, 9, 11, 0, 5)
index = pd.date_range(start, end, freq="1Min")
rec = pd.Series(records, index=index)
rec2 = pd.Series(records)
assert abs_relative_error(mkt(rec), 20.752780387897474) < 0.001
assert abs_relative_error(mkt(rec2), 20.752780387897474) < 0.001
| gpl-3.0 |
ericlin-ICT/mysite | src/clouddesktop/core/query.py | 1 | 7926 | #-*- encoding: utf-8 -*-
'''
Created on 2014年2月14日
@author: ericlin
'''
import logging
import ldap.resiter
from pandas.core.frame import DataFrame
import re
class Query(ldap.resiter.ResultProcessor):
'''
Description:
--------------------------------------------------------
perform ad query operation
--------------------------------------------------------
'''
def __init__(self,
server,
user_email,
password,
dc,
port = 389 ):
'''
Description:
--------------------------------------------------------
Constructor
--------------------------------------------------------
Args:
--------------------------------------------------------
dc string domain control string(like 'DC=testlxh,DC=com')
handle SimpleLDAPObject connection handle, default null
---------------------------------------------------------
Exception:
---------------------------------------------------------
throw QueryException
---------------------------------------------------------
'''
self.dc = dc
self.server = server
self.port = port
self.user_email = user_email
self.pwd = password
self.logger = logging.getLogger("ADQueryLogger")
self.conn = ldap.initialize('ldap://128.192.214.251:389')
self.conn.simple_bind(user_email, pwd)
def get_group(self, sAMAccountName=''):
'''
Description:
--------------------------------------------------------
Use aAMAccountName as condition to search user's group info
--------------------------------------------------------
Args:
--------------------------------------------------------
sAMAccountName string user name in ccb(like linxianghui.zh)
---------------------------------------------------------
Return:
---------------------------------------------------------
json string with "username:memberof", None if failed
---------------------------------------------------------
'''
# construct search condition
option = ldap.SCOPE_SUBTREE
condition = 'sAMAccountName=%s' % (sAMAccountName)
attrs = ['sAMAccountName','memberOf']
res = None
#try:
# Asynchronous search method
# do search
#s = conn.search_s(self.dc, option, condition, attrs)
msg_id = self.conn.search(self.dc, option, condition, attrs)
for item in self.allresults(msg_id):
print item
def get_dn(self, sAMAccountName=''):
'''
Description:
--------------------------------------------------------
get distinguished name by sAMAccountName
--------------------------------------------------------
Args:
----------------------------------------------------------
sAMAccountName string Object's flpm id
----------------------------------------------------------
Return:
----------------------------------------------------------
distinguished name of the obj, None if failed
----------------------------------------------------------
'''
# construct search condition
option = ldap.SCOPE_SUBTREE
condition = 'sAMAccountName=%s' % (sAMAccountName)
attrs = ['distinguishedName']
# do search
try:
# search AD
res = self.handle.search_s(self.dc, option, condition, attrs)
except ldap.error, e :
res = None
self.logger.debug(e)
self.logger.error("查询 %s 失败" % sAMAccountName)
except Exception, e:
res = None
self.logger.debug(e)
self.logger.error("查询 %s 失败" % sAMAccountName)
finally:
return res
class InnerLDAPObject(ldap.ldapobject.LDAPObject,ldap.resiter.ResultProcessor):
pass
class AsyncQuery(object):
def __init__(self,
server,
user_email,
password,
dc,
port = 389):
self.dc = dc
self.server = server
self.port = port
self.user_email = user_email
self.pwd = password
self.logger = logging.getLogger("ADQueryLogger")
self.uri = 'ldap://' + self.server + ':' + str(self.port)
def __connect(self):
self.ldap_obj = InnerLDAPObject(self.uri)
self.ldap_obj.protocol_version = ldap.VERSION3
self.ldap_obj.set_option(ldap.OPT_REFERRALS,0)
self.ldap_obj.simple_bind_s(self.user_email, self.pwd)
def __disconnect(self):
self.ldap_obj.unbind_ext_s()
#print 'disconnect'
def async_get_dn(self, sAMAccountName):
self.__connect()
option = ldap.SCOPE_SUBTREE
condition = 'sAMAccountName=%s' % (sAMAccountName)
attrs = ['distinguishedName']
try:
msg_id = self.ldap_obj.search(self.dc, option, condition, attrs)
s = ''
res = None
for i in self.ldap_obj.allresults(msg_id, 10):
s = i[1]
break
#print 's:', s
# not find user, return None
if str(s).find('distinguishedName') == -1:
return None
df_tmp = DataFrame(s, columns=['user', 'attrs'])
# get first record attrs dict
attrs = df_tmp['attrs'][0]
#print 'attr:', attrs
# find memberOf attr
if 'distinguishedName' in attrs.keys():
dn = attrs['distinguishedName']
#print str(dn)
res = dn[0]
print 'res', res
print 'typs', type(res)
except Exception, e:
print e
finally:
self.__disconnect()
return res
def async_get_group(self, sAMAccountName): # Asynchronous search method
self.__connect()
option = ldap.SCOPE_SUBTREE
condition = 'sAMAccountName=%s' % (sAMAccountName)
attrs = ['sAMAccountName',
'memberOf',]
res = None
try:
msg_id = self.ldap_obj.search(self.dc, option, condition, attrs)
s = ''
for i in self.ldap_obj.allresults(msg_id, 10):
s = i[1]
break
# not find user, return None
if str(s).find('sAMAccountName') == -1:
return None
if str(s).find('memberOf') == -1:
res = ''
#print s
df_tmp = DataFrame(s, columns=['user', 'attrs'])
# get first record attrs dict
attrs = df_tmp['attrs'][0]
# find memberOf attr
if 'memberOf' in attrs.keys():
memberof = attrs['memberOf']
res = (sAMAccountName, memberof)
return res
except Exception,e:
print e
finally:
self.__disconnect()
if __name__=='__main__':
server = '128.192.214.251'
port = 389
user_email = '[email protected]'
pwd = '123'
dc = 'dc=lxhtest,dc=com'
#q = query(server,user_email,pwd,dc = dc, port=ldap.PORT )
sAMAccountName= 'lxhadmin'
#res = q.find_gourp_by_aAMAccountName(sAMAccountName)
#print res
#conn = ldap.initialize('ldap://128.192.214.251:389')
#conn.simple_bind(user_email, pwd)
# do search
option = ldap.SCOPE_SUBTREE
condition = 'sAMAccountName=%s' % (sAMAccountName)
attrs = ['sAMAccountName','memberOf']
#s = conn.search_s(dc, option, condition, attrs)
#print s
aQuery = AsyncQuery(server,user_email,pwd,dc = dc, port=ldap.PORT )
print aQuery.async_get_group(sAMAccountName)
#print aQuery.async_get_dn(sAMAccountName)
'''
# Asynchronous search method
msg_id = conn.search(dc, option, condition)
ldap.ldapobject.LDAPObject
for item in conn.allresults(msg_id):
print item
'''
#for res_type,res_data,res_msgid,res_controls in conn.allresults(msg_id):
| apache-2.0 |
jmargeta/scikit-learn | examples/plot_multilabel.py | 4 | 4168 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.pls import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
pl.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
# Convert list of tuples to a class indicator matrix first
Y_indicator = LabelBinarizer().fit(Y).transform(Y)
X = CCA(n_components=2).fit(X, Y_indicator).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
pl.subplot(2, 2, subplot)
pl.title(title)
zero_class = np.where([0 in y for y in Y])
one_class = np.where([1 in y for y in Y])
pl.scatter(X[:, 0], X[:, 1], s=40, c='gray')
pl.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
pl.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
pl.axis('tight')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
pl.xticks(())
pl.yticks(())
if subplot == 2:
pl.xlim(min_x - 5, max_x)
pl.xlabel('First principal component')
pl.ylabel('Second principal component')
pl.legend(loc="upper left")
pl.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
pl.subplots_adjust(.04, .02, .97, .94, .09, .2)
pl.show()
| bsd-3-clause |
nomadcube/scikit-learn | examples/applications/plot_prediction_latency.py | 234 | 11277 | """
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[i, :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
plt.setp(ax1, xticklabels=cls_infos)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[0])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
###############################################################################
# main code
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
| bsd-3-clause |
wholmgren/pvlib-python | pvlib/solarposition.py | 1 | 48876 | """
Calculate the solar position using a variety of methods/packages.
"""
# Contributors:
# Rob Andrews (@Calama-Consulting), Calama Consulting, 2014
# Will Holmgren (@wholmgren), University of Arizona, 2014
# Tony Lorenzo (@alorenzo175), University of Arizona, 2015
# Cliff hansen (@cwhanse), Sandia National Laboratories, 2018
from __future__ import division
import os
import datetime as dt
try:
from importlib import reload
except ImportError:
try:
from imp import reload
except ImportError:
pass
import numpy as np
import pandas as pd
import warnings
from pvlib import atmosphere
from pvlib.tools import datetime_to_djd, djd_to_datetime
from pvlib._deprecation import deprecated
NS_PER_HR = 1.e9 * 3600. # nanoseconds per hour
def get_solarposition(time, latitude, longitude,
altitude=None, pressure=None,
method='nrel_numpy',
temperature=12, **kwargs):
"""
A convenience wrapper for the solar position calculators.
Parameters
----------
time : pandas.DatetimeIndex
latitude : float
longitude : float
altitude : None or float, default None
If None, computed from pressure. Assumed to be 0 m
if pressure is also None.
pressure : None or float, default None
If None, computed from altitude. Assumed to be 101325 Pa
if altitude is also None.
method : string, default 'nrel_numpy'
'nrel_numpy' uses an implementation of the NREL SPA algorithm
described in [1] (default, recommended): :py:func:`spa_python`
'nrel_numba' uses an implementation of the NREL SPA algorithm
described in [1], but also compiles the code first:
:py:func:`spa_python`
'pyephem' uses the PyEphem package: :py:func:`pyephem`
'ephemeris' uses the pvlib ephemeris code: :py:func:`ephemeris`
'nrel_c' uses the NREL SPA C code [3]: :py:func:`spa_c`
temperature : float, default 12
Degrees C.
Other keywords are passed to the underlying solar position function.
References
----------
[1] I. Reda and A. Andreas, Solar position algorithm for solar radiation
applications. Solar Energy, vol. 76, no. 5, pp. 577-589, 2004.
[2] I. Reda and A. Andreas, Corrigendum to Solar position algorithm for
solar radiation applications. Solar Energy, vol. 81, no. 6, p. 838, 2007.
[3] NREL SPA code: http://rredc.nrel.gov/solar/codesandalgorithms/spa/
"""
if altitude is None and pressure is None:
altitude = 0.
pressure = 101325.
elif altitude is None:
altitude = atmosphere.pres2alt(pressure)
elif pressure is None:
pressure = atmosphere.alt2pres(altitude)
method = method.lower()
if isinstance(time, dt.datetime):
time = pd.DatetimeIndex([time, ])
if method == 'nrel_c':
ephem_df = spa_c(time, latitude, longitude, pressure, temperature,
**kwargs)
elif method == 'nrel_numba':
ephem_df = spa_python(time, latitude, longitude, altitude,
pressure, temperature,
how='numba', **kwargs)
elif method == 'nrel_numpy':
ephem_df = spa_python(time, latitude, longitude, altitude,
pressure, temperature,
how='numpy', **kwargs)
elif method == 'pyephem':
ephem_df = pyephem(time, latitude, longitude,
altitude=altitude,
pressure=pressure,
temperature=temperature, **kwargs)
elif method == 'ephemeris':
ephem_df = ephemeris(time, latitude, longitude, pressure, temperature,
**kwargs)
else:
raise ValueError('Invalid solar position method')
return ephem_df
def spa_c(time, latitude, longitude, pressure=101325, altitude=0,
temperature=12, delta_t=67.0,
raw_spa_output=False):
"""
Calculate the solar position using the C implementation of the NREL
SPA code.
The source files for this code are located in './spa_c_files/', along with
a README file which describes how the C code is wrapped in Python.
Due to license restrictions, the C code must be downloaded seperately
and used in accordance with it's license.
This function is slower and no more accurate than :py:func:`spa_python`.
Parameters
----------
time : pandas.DatetimeIndex
Localized or UTC.
latitude : float
longitude : float
pressure : float, default 101325
Pressure in Pascals
altitude : float, default 0
Elevation above sea level.
temperature : float, default 12
Temperature in C
delta_t : float, default 67.0
Difference between terrestrial time and UT1.
USNO has previous values and predictions.
raw_spa_output : bool, default False
If true, returns the raw SPA output.
Returns
-------
DataFrame
The DataFrame will have the following columns:
elevation,
azimuth,
zenith,
apparent_elevation,
apparent_zenith.
References
----------
NREL SPA reference: http://rredc.nrel.gov/solar/codesandalgorithms/spa/
NREL SPA C files: https://midcdmz.nrel.gov/spa/
Note: The ``timezone`` field in the SPA C files is replaced with
``time_zone`` to avoid a nameclash with the function ``__timezone`` that is
redefined by Python>=3.5. This issue is
`Python bug 24643 <https://bugs.python.org/issue24643>`_.
USNO delta T:
http://www.usno.navy.mil/USNO/earth-orientation/eo-products/long-term
See also
--------
pyephem, spa_python, ephemeris
"""
# Added by Rob Andrews (@Calama-Consulting), Calama Consulting, 2014
# Edited by Will Holmgren (@wholmgren), University of Arizona, 2014
# Edited by Tony Lorenzo (@alorenzo175), University of Arizona, 2015
try:
from pvlib.spa_c_files.spa_py import spa_calc
except ImportError:
raise ImportError('Could not import built-in SPA calculator. ' +
'You may need to recompile the SPA code.')
# if localized, convert to UTC. otherwise, assume UTC.
try:
time_utc = time.tz_convert('UTC')
except TypeError:
time_utc = time
spa_out = []
for date in time_utc:
spa_out.append(spa_calc(year=date.year,
month=date.month,
day=date.day,
hour=date.hour,
minute=date.minute,
second=date.second,
time_zone=0, # date uses utc time
latitude=latitude,
longitude=longitude,
elevation=altitude,
pressure=pressure / 100,
temperature=temperature,
delta_t=delta_t
))
spa_df = pd.DataFrame(spa_out, index=time)
if raw_spa_output:
# rename "time_zone" from raw output from spa_c_files.spa_py.spa_calc()
# to "timezone" to match the API of pvlib.solarposition.spa_c()
return spa_df.rename(columns={'time_zone': 'timezone'})
else:
dfout = pd.DataFrame({'azimuth': spa_df['azimuth'],
'apparent_zenith': spa_df['zenith'],
'apparent_elevation': spa_df['e'],
'elevation': spa_df['e0'],
'zenith': 90 - spa_df['e0']})
return dfout
def _spa_python_import(how):
"""Compile spa.py appropriately"""
from pvlib import spa
# check to see if the spa module was compiled with numba
using_numba = spa.USE_NUMBA
if how == 'numpy' and using_numba:
# the spa module was compiled to numba code, so we need to
# reload the module without compiling
# the PVLIB_USE_NUMBA env variable is used to tell the module
# to not compile with numba
warnings.warn('Reloading spa to use numpy')
os.environ['PVLIB_USE_NUMBA'] = '0'
spa = reload(spa)
del os.environ['PVLIB_USE_NUMBA']
elif how == 'numba' and not using_numba:
# The spa module was not compiled to numba code, so set
# PVLIB_USE_NUMBA so it does compile to numba on reload.
warnings.warn('Reloading spa to use numba')
os.environ['PVLIB_USE_NUMBA'] = '1'
spa = reload(spa)
del os.environ['PVLIB_USE_NUMBA']
elif how != 'numba' and how != 'numpy':
raise ValueError("how must be either 'numba' or 'numpy'")
return spa
def spa_python(time, latitude, longitude,
altitude=0, pressure=101325, temperature=12, delta_t=67.0,
atmos_refract=None, how='numpy', numthreads=4, **kwargs):
"""
Calculate the solar position using a python implementation of the
NREL SPA algorithm described in [1].
If numba is installed, the functions can be compiled to
machine code and the function can be multithreaded.
Without numba, the function evaluates via numpy with
a slight performance hit.
Parameters
----------
time : pandas.DatetimeIndex
Localized or UTC.
latitude : float
longitude : float
altitude : float, default 0
pressure : int or float, optional, default 101325
avg. yearly air pressure in Pascals.
temperature : int or float, optional, default 12
avg. yearly air temperature in degrees C.
delta_t : float, optional, default 67.0
If delta_t is None, uses spa.calculate_deltat
using time.year and time.month from pandas.DatetimeIndex.
For most simulations specifing delta_t is sufficient.
Difference between terrestrial time and UT1.
*Note: delta_t = None will break code using nrel_numba,
this will be fixed in a future version.*
The USNO has historical and forecasted delta_t [3].
atmos_refrac : None or float, optional, default None
The approximate atmospheric refraction (in degrees)
at sunrise and sunset.
how : str, optional, default 'numpy'
Options are 'numpy' or 'numba'. If numba >= 0.17.0
is installed, how='numba' will compile the spa functions
to machine code and run them multithreaded.
numthreads : int, optional, default 4
Number of threads to use if how == 'numba'.
Returns
-------
DataFrame
The DataFrame will have the following columns:
apparent_zenith (degrees),
zenith (degrees),
apparent_elevation (degrees),
elevation (degrees),
azimuth (degrees),
equation_of_time (minutes).
References
----------
[1] I. Reda and A. Andreas, Solar position algorithm for solar
radiation applications. Solar Energy, vol. 76, no. 5, pp. 577-589, 2004.
[2] I. Reda and A. Andreas, Corrigendum to Solar position algorithm for
solar radiation applications. Solar Energy, vol. 81, no. 6, p. 838, 2007.
[3] USNO delta T:
http://www.usno.navy.mil/USNO/earth-orientation/eo-products/long-term
See also
--------
pyephem, spa_c, ephemeris
"""
# Added by Tony Lorenzo (@alorenzo175), University of Arizona, 2015
lat = latitude
lon = longitude
elev = altitude
pressure = pressure / 100 # pressure must be in millibars for calculation
atmos_refract = atmos_refract or 0.5667
if not isinstance(time, pd.DatetimeIndex):
try:
time = pd.DatetimeIndex(time)
except (TypeError, ValueError):
time = pd.DatetimeIndex([time, ])
unixtime = np.array(time.astype(np.int64)/10**9)
spa = _spa_python_import(how)
delta_t = delta_t or spa.calculate_deltat(time.year, time.month)
app_zenith, zenith, app_elevation, elevation, azimuth, eot = \
spa.solar_position(unixtime, lat, lon, elev, pressure, temperature,
delta_t, atmos_refract, numthreads)
result = pd.DataFrame({'apparent_zenith': app_zenith, 'zenith': zenith,
'apparent_elevation': app_elevation,
'elevation': elevation, 'azimuth': azimuth,
'equation_of_time': eot},
index=time)
return result
def sun_rise_set_transit_spa(times, latitude, longitude, how='numpy',
delta_t=67.0, numthreads=4):
"""
Calculate the sunrise, sunset, and sun transit times using the
NREL SPA algorithm described in [1].
If numba is installed, the functions can be compiled to
machine code and the function can be multithreaded.
Without numba, the function evaluates via numpy with
a slight performance hit.
Parameters
----------
times : pandas.DatetimeIndex
Must be localized to the timezone for ``latitude`` and ``longitude``.
latitude : float
Latitude in degrees, positive north of equator, negative to south
longitude : float
Longitude in degrees, positive east of prime meridian, negative to west
delta_t : float, optional
If delta_t is None, uses spa.calculate_deltat
using times.year and times.month from pandas.DatetimeIndex.
For most simulations specifing delta_t is sufficient.
Difference between terrestrial time and UT1.
delta_t = None will break code using nrel_numba,
this will be fixed in a future version.
By default, use USNO historical data and predictions
how : str, optional, default 'numpy'
Options are 'numpy' or 'numba'. If numba >= 0.17.0
is installed, how='numba' will compile the spa functions
to machine code and run them multithreaded.
numthreads : int, optional, default 4
Number of threads to use if how == 'numba'.
Returns
-------
pandas.DataFrame
index is the same as input `times` argument
columns are 'sunrise', 'sunset', and 'transit'
References
----------
[1] Reda, I., Andreas, A., 2003. Solar position algorithm for solar
radiation applications. Technical report: NREL/TP-560- 34302. Golden,
USA, http://www.nrel.gov.
"""
# Added by Tony Lorenzo (@alorenzo175), University of Arizona, 2015
lat = latitude
lon = longitude
# times must be localized
if times.tz:
tzinfo = times.tz
else:
raise ValueError('times must be localized')
# must convert to midnight UTC on day of interest
utcday = pd.DatetimeIndex(times.date).tz_localize('UTC')
unixtime = np.array(utcday.astype(np.int64)/10**9)
spa = _spa_python_import(how)
delta_t = delta_t or spa.calculate_deltat(times.year, times.month)
transit, sunrise, sunset = spa.transit_sunrise_sunset(
unixtime, lat, lon, delta_t, numthreads)
# arrays are in seconds since epoch format, need to conver to timestamps
transit = pd.to_datetime(transit*1e9, unit='ns', utc=True).tz_convert(
tzinfo).tolist()
sunrise = pd.to_datetime(sunrise*1e9, unit='ns', utc=True).tz_convert(
tzinfo).tolist()
sunset = pd.to_datetime(sunset*1e9, unit='ns', utc=True).tz_convert(
tzinfo).tolist()
return pd.DataFrame(index=times, data={'sunrise': sunrise,
'sunset': sunset,
'transit': transit})
get_sun_rise_set_transit = deprecated('0.6.1',
alternative='sun_rise_set_transit_spa',
name='get_sun_rise_set_transit',
removal='0.7')(sun_rise_set_transit_spa)
def _ephem_convert_to_seconds_and_microseconds(date):
# utility from unreleased PyEphem 3.6.7.1
"""Converts a PyEphem date into seconds"""
microseconds = int(round(24 * 60 * 60 * 1000000 * date))
seconds, microseconds = divmod(microseconds, 1000000)
seconds -= 2209032000 # difference between epoch 1900 and epoch 1970
return seconds, microseconds
def _ephem_to_timezone(date, tzinfo):
# utility from unreleased PyEphem 3.6.7.1
""""Convert a PyEphem Date into a timezone aware python datetime"""
seconds, microseconds = _ephem_convert_to_seconds_and_microseconds(date)
date = dt.datetime.fromtimestamp(seconds, tzinfo)
date = date.replace(microsecond=microseconds)
return date
def _ephem_setup(latitude, longitude, altitude, pressure, temperature,
horizon):
import ephem
# initialize a PyEphem observer
obs = ephem.Observer()
obs.lat = str(latitude)
obs.lon = str(longitude)
obs.elevation = altitude
obs.pressure = pressure / 100. # convert to mBar
obs.temp = temperature
obs.horizon = horizon
# the PyEphem sun
sun = ephem.Sun()
return obs, sun
def sun_rise_set_transit_ephem(times, latitude, longitude,
next_or_previous='next',
altitude=0,
pressure=101325,
temperature=12, horizon='0:00'):
"""
Calculate the next sunrise and sunset times using the PyEphem package.
Parameters
----------
time : pandas.DatetimeIndex
Must be localized
latitude : float
Latitude in degrees, positive north of equator, negative to south
longitude : float
Longitude in degrees, positive east of prime meridian, negative to west
next_or_previous : str
'next' or 'previous' sunrise and sunset relative to time
altitude : float, default 0
distance above sea level in meters.
pressure : int or float, optional, default 101325
air pressure in Pascals.
temperature : int or float, optional, default 12
air temperature in degrees C.
horizon : string, format +/-X:YY
arc degrees:arc minutes from geometrical horizon for sunrise and
sunset, e.g., horizon='+0:00' to use sun center crossing the
geometrical horizon to define sunrise and sunset,
horizon='-0:34' for when the sun's upper edge crosses the
geometrical horizon
Returns
-------
pandas.DataFrame
index is the same as input `time` argument
columns are 'sunrise', 'sunset', and 'transit'
See also
--------
pyephem
"""
try:
import ephem
except ImportError:
raise ImportError('PyEphem must be installed')
# times must be localized
if times.tz:
tzinfo = times.tz
else:
raise ValueError('times must be localized')
obs, sun = _ephem_setup(latitude, longitude, altitude,
pressure, temperature, horizon)
# create lists of sunrise and sunset time localized to time.tz
if next_or_previous.lower() == 'next':
rising = obs.next_rising
setting = obs.next_setting
transit = obs.next_transit
elif next_or_previous.lower() == 'previous':
rising = obs.previous_rising
setting = obs.previous_setting
transit = obs.previous_transit
else:
raise ValueError("next_or_previous must be either 'next' or" +
" 'previous'")
sunrise = []
sunset = []
trans = []
for thetime in times:
thetime = thetime.to_pydatetime()
# pyephem drops timezone when converting to its internal datetime
# format, so handle timezone explicitly here
obs.date = ephem.Date(thetime - thetime.utcoffset())
sunrise.append(_ephem_to_timezone(rising(sun), tzinfo))
sunset.append(_ephem_to_timezone(setting(sun), tzinfo))
trans.append(_ephem_to_timezone(transit(sun), tzinfo))
return pd.DataFrame(index=times, data={'sunrise': sunrise,
'sunset': sunset,
'transit': trans})
def pyephem(time, latitude, longitude, altitude=0, pressure=101325,
temperature=12, horizon='+0:00'):
"""
Calculate the solar position using the PyEphem package.
Parameters
----------
time : pandas.DatetimeIndex
Localized or UTC.
latitude : float
positive is north of 0
longitude : float
positive is east of 0
altitude : float, default 0
distance above sea level in meters.
pressure : int or float, optional, default 101325
air pressure in Pascals.
temperature : int or float, optional, default 12
air temperature in degrees C.
horizon : string, optional, default '+0:00'
arc degrees:arc minutes from geometrical horizon for sunrise and
sunset, e.g., horizon='+0:00' to use sun center crossing the
geometrical horizon to define sunrise and sunset,
horizon='-0:34' for when the sun's upper edge crosses the
geometrical horizon
Returns
-------
pandas.DataFrame
index is the same as input `time` argument
The DataFrame will have the following columns:
apparent_elevation, elevation,
apparent_azimuth, azimuth,
apparent_zenith, zenith.
See also
--------
spa_python, spa_c, ephemeris
"""
# Written by Will Holmgren (@wholmgren), University of Arizona, 2014
try:
import ephem
except ImportError:
raise ImportError('PyEphem must be installed')
# if localized, convert to UTC. otherwise, assume UTC.
try:
time_utc = time.tz_convert('UTC')
except TypeError:
time_utc = time
sun_coords = pd.DataFrame(index=time)
obs, sun = _ephem_setup(latitude, longitude, altitude,
pressure, temperature, horizon)
# make and fill lists of the sun's altitude and azimuth
# this is the pressure and temperature corrected apparent alt/az.
alts = []
azis = []
for thetime in time_utc:
obs.date = ephem.Date(thetime)
sun.compute(obs)
alts.append(sun.alt)
azis.append(sun.az)
sun_coords['apparent_elevation'] = alts
sun_coords['apparent_azimuth'] = azis
# redo it for p=0 to get no atmosphere alt/az
obs.pressure = 0
alts = []
azis = []
for thetime in time_utc:
obs.date = ephem.Date(thetime)
sun.compute(obs)
alts.append(sun.alt)
azis.append(sun.az)
sun_coords['elevation'] = alts
sun_coords['azimuth'] = azis
# convert to degrees. add zenith
sun_coords = np.rad2deg(sun_coords)
sun_coords['apparent_zenith'] = 90 - sun_coords['apparent_elevation']
sun_coords['zenith'] = 90 - sun_coords['elevation']
return sun_coords
def ephemeris(time, latitude, longitude, pressure=101325, temperature=12):
"""
Python-native solar position calculator.
The accuracy of this code is not guaranteed.
Consider using the built-in spa_c code or the PyEphem library.
Parameters
----------
time : pandas.DatetimeIndex
latitude : float
longitude : float
pressure : float or Series, default 101325
Ambient pressure (Pascals)
temperature : float or Series, default 12
Ambient temperature (C)
Returns
-------
DataFrame with the following columns:
* apparent_elevation : apparent sun elevation accounting for
atmospheric refraction.
* elevation : actual elevation (not accounting for refraction)
of the sun in decimal degrees, 0 = on horizon.
The complement of the zenith angle.
* azimuth : Azimuth of the sun in decimal degrees East of North.
This is the complement of the apparent zenith angle.
* apparent_zenith : apparent sun zenith accounting for atmospheric
refraction.
* zenith : Solar zenith angle
* solar_time : Solar time in decimal hours (solar noon is 12.00).
References
-----------
Grover Hughes' class and related class materials on Engineering
Astronomy at Sandia National Laboratories, 1985.
See also
--------
pyephem, spa_c, spa_python
"""
# Added by Rob Andrews (@Calama-Consulting), Calama Consulting, 2014
# Edited by Will Holmgren (@wholmgren), University of Arizona, 2014
# Most comments in this function are from PVLIB_MATLAB or from
# pvlib-python's attempt to understand and fix problems with the
# algorithm. The comments are *not* based on the reference material.
# This helps a little bit:
# http://www.cv.nrao.edu/~rfisher/Ephemerides/times.html
# the inversion of longitude is due to the fact that this code was
# originally written for the convention that positive longitude were for
# locations west of the prime meridian. However, the correct convention (as
# of 2009) is to use negative longitudes for locations west of the prime
# meridian. Therefore, the user should input longitude values under the
# correct convention (e.g. Albuquerque is at -106 longitude), but it needs
# to be inverted for use in the code.
Latitude = latitude
Longitude = -1 * longitude
Abber = 20 / 3600.
LatR = np.radians(Latitude)
# the SPA algorithm needs time to be expressed in terms of
# decimal UTC hours of the day of the year.
# if localized, convert to UTC. otherwise, assume UTC.
try:
time_utc = time.tz_convert('UTC')
except TypeError:
time_utc = time
# strip out the day of the year and calculate the decimal hour
DayOfYear = time_utc.dayofyear
DecHours = (time_utc.hour + time_utc.minute/60. + time_utc.second/3600. +
time_utc.microsecond/3600.e6)
# np.array needed for pandas > 0.20
UnivDate = np.array(DayOfYear)
UnivHr = np.array(DecHours)
Yr = np.array(time_utc.year) - 1900
YrBegin = 365 * Yr + np.floor((Yr - 1) / 4.) - 0.5
Ezero = YrBegin + UnivDate
T = Ezero / 36525.
# Calculate Greenwich Mean Sidereal Time (GMST)
GMST0 = 6 / 24. + 38 / 1440. + (
45.836 + 8640184.542 * T + 0.0929 * T ** 2) / 86400.
GMST0 = 360 * (GMST0 - np.floor(GMST0))
GMSTi = np.mod(GMST0 + 360 * (1.0027379093 * UnivHr / 24.), 360)
# Local apparent sidereal time
LocAST = np.mod((360 + GMSTi - Longitude), 360)
EpochDate = Ezero + UnivHr / 24.
T1 = EpochDate / 36525.
ObliquityR = np.radians(
23.452294 - 0.0130125 * T1 - 1.64e-06 * T1 ** 2 + 5.03e-07 * T1 ** 3)
MlPerigee = 281.22083 + 4.70684e-05 * EpochDate + 0.000453 * T1 ** 2 + (
3e-06 * T1 ** 3)
MeanAnom = np.mod((358.47583 + 0.985600267 * EpochDate - 0.00015 *
T1 ** 2 - 3e-06 * T1 ** 3), 360)
Eccen = 0.01675104 - 4.18e-05 * T1 - 1.26e-07 * T1 ** 2
EccenAnom = MeanAnom
E = 0
while np.max(abs(EccenAnom - E)) > 0.0001:
E = EccenAnom
EccenAnom = MeanAnom + np.degrees(Eccen)*np.sin(np.radians(E))
TrueAnom = (
2 * np.mod(np.degrees(np.arctan2(((1 + Eccen) / (1 - Eccen)) ** 0.5 *
np.tan(np.radians(EccenAnom) / 2.), 1)), 360))
EcLon = np.mod(MlPerigee + TrueAnom, 360) - Abber
EcLonR = np.radians(EcLon)
DecR = np.arcsin(np.sin(ObliquityR)*np.sin(EcLonR))
RtAscen = np.degrees(np.arctan2(np.cos(ObliquityR)*np.sin(EcLonR),
np.cos(EcLonR)))
HrAngle = LocAST - RtAscen
HrAngleR = np.radians(HrAngle)
HrAngle = HrAngle - (360 * ((abs(HrAngle) > 180)))
SunAz = np.degrees(np.arctan2(-np.sin(HrAngleR),
np.cos(LatR)*np.tan(DecR) -
np.sin(LatR)*np.cos(HrAngleR)))
SunAz[SunAz < 0] += 360
SunEl = np.degrees(np.arcsin(
np.cos(LatR) * np.cos(DecR) * np.cos(HrAngleR) +
np.sin(LatR) * np.sin(DecR)))
SolarTime = (180 + HrAngle) / 15.
# Calculate refraction correction
Elevation = SunEl
TanEl = pd.Series(np.tan(np.radians(Elevation)), index=time_utc)
Refract = pd.Series(0, index=time_utc)
Refract[(Elevation > 5) & (Elevation <= 85)] = (
58.1/TanEl - 0.07/(TanEl**3) + 8.6e-05/(TanEl**5))
Refract[(Elevation > -0.575) & (Elevation <= 5)] = (
Elevation *
(-518.2 + Elevation*(103.4 + Elevation*(-12.79 + Elevation*0.711))) +
1735)
Refract[(Elevation > -1) & (Elevation <= -0.575)] = -20.774 / TanEl
Refract *= (283/(273. + temperature)) * (pressure/101325.) / 3600.
ApparentSunEl = SunEl + Refract
# make output DataFrame
DFOut = pd.DataFrame(index=time_utc)
DFOut['apparent_elevation'] = ApparentSunEl
DFOut['elevation'] = SunEl
DFOut['azimuth'] = SunAz
DFOut['apparent_zenith'] = 90 - ApparentSunEl
DFOut['zenith'] = 90 - SunEl
DFOut['solar_time'] = SolarTime
DFOut.index = time
return DFOut
def calc_time(lower_bound, upper_bound, latitude, longitude, attribute, value,
altitude=0, pressure=101325, temperature=12, horizon='+0:00',
xtol=1.0e-12):
"""
Calculate the time between lower_bound and upper_bound
where the attribute is equal to value. Uses PyEphem for
solar position calculations.
Parameters
----------
lower_bound : datetime.datetime
upper_bound : datetime.datetime
latitude : float
longitude : float
attribute : str
The attribute of a pyephem.Sun object that
you want to solve for. Likely options are 'alt'
and 'az' (which must be given in radians).
value : int or float
The value of the attribute to solve for
altitude : float, default 0
Distance above sea level.
pressure : int or float, optional, default 101325
Air pressure in Pascals. Set to 0 for no
atmospheric correction.
temperature : int or float, optional, default 12
Air temperature in degrees C.
horizon : string, optional, default '+0:00'
arc degrees:arc minutes from geometrical horizon for sunrise and
sunset, e.g., horizon='+0:00' to use sun center crossing the
geometrical horizon to define sunrise and sunset,
horizon='-0:34' for when the sun's upper edge crosses the
geometrical horizon
xtol : float, optional, default 1.0e-12
The allowed error in the result from value
Returns
-------
datetime.datetime
Raises
------
ValueError
If the value is not contained between the bounds.
AttributeError
If the given attribute is not an attribute of a
PyEphem.Sun object.
"""
try:
import scipy.optimize as so
except ImportError:
raise ImportError('The calc_time function requires scipy')
obs, sun = _ephem_setup(latitude, longitude, altitude,
pressure, temperature, horizon)
def compute_attr(thetime, target, attr):
obs.date = thetime
sun.compute(obs)
return getattr(sun, attr) - target
lb = datetime_to_djd(lower_bound)
ub = datetime_to_djd(upper_bound)
djd_root = so.brentq(compute_attr, lb, ub,
(value, attribute), xtol=xtol)
return djd_to_datetime(djd_root)
def pyephem_earthsun_distance(time):
"""
Calculates the distance from the earth to the sun using pyephem.
Parameters
----------
time : pd.DatetimeIndex
Returns
-------
pd.Series. Earth-sun distance in AU.
"""
import ephem
sun = ephem.Sun()
earthsun = []
for thetime in time:
sun.compute(ephem.Date(thetime))
earthsun.append(sun.earth_distance)
return pd.Series(earthsun, index=time)
def nrel_earthsun_distance(time, how='numpy', delta_t=67.0, numthreads=4):
"""
Calculates the distance from the earth to the sun using the
NREL SPA algorithm described in [1]_.
Parameters
----------
time : pd.DatetimeIndex
how : str, optional, default 'numpy'
Options are 'numpy' or 'numba'. If numba >= 0.17.0
is installed, how='numba' will compile the spa functions
to machine code and run them multithreaded.
delta_t : float, optional, default 67.0
If delta_t is None, uses spa.calculate_deltat
using time.year and time.month from pandas.DatetimeIndex.
For most simulations specifing delta_t is sufficient.
Difference between terrestrial time and UT1.
*Note: delta_t = None will break code using nrel_numba,
this will be fixed in a future version.*
By default, use USNO historical data and predictions
numthreads : int, optional, default 4
Number of threads to use if how == 'numba'.
Returns
-------
dist : pd.Series
Earth-sun distance in AU.
References
----------
.. [1] Reda, I., Andreas, A., 2003. Solar position algorithm for solar
radiation applications. Technical report: NREL/TP-560- 34302. Golden,
USA, http://www.nrel.gov.
"""
if not isinstance(time, pd.DatetimeIndex):
try:
time = pd.DatetimeIndex(time)
except (TypeError, ValueError):
time = pd.DatetimeIndex([time, ])
unixtime = np.array(time.astype(np.int64)/10**9)
spa = _spa_python_import(how)
delta_t = delta_t or spa.calculate_deltat(time.year, time.month)
dist = spa.earthsun_distance(unixtime, delta_t, numthreads)
dist = pd.Series(dist, index=time)
return dist
def _calculate_simple_day_angle(dayofyear, offset=1):
"""
Calculates the day angle for the Earth's orbit around the Sun.
Parameters
----------
dayofyear : numeric
offset : int, default 1
For the Spencer method, offset=1; for the ASCE method, offset=0
Returns
-------
day_angle : numeric
"""
return (2. * np.pi / 365.) * (dayofyear - offset)
def equation_of_time_spencer71(dayofyear):
"""
Equation of time from Duffie & Beckman and attributed to Spencer
(1971) and Iqbal (1983).
The coefficients correspond to the online copy of the `Fourier
paper`_ [1]_ in the Sundial Mailing list that was posted in 1998 by
Mac Oglesby from his correspondence with Macquarie University Prof.
John Pickard who added the following note.
In the early 1970s, I contacted Dr Spencer about this method because I
was trying to use a hand calculator for calculating solar positions,
etc. He was extremely helpful and gave me a reprint of this paper. He
also pointed out an error in the original: in the series for E, the
constant was printed as 0.000075 rather than 0.0000075. I have
corrected the error in this version.
There appears to be another error in formula as printed in both
Duffie & Beckman's [2]_ and Frank Vignola's [3]_ books in which the
coefficient 0.04089 is printed instead of 0.040849, corresponding to
the value used in the Bird Clear Sky model implemented by Daryl
Myers [4]_ and printed in both the Fourier paper from the Sundial
Mailing List and R. Hulstrom's [5]_ book.
.. _Fourier paper: http://www.mail-archive.com/[email protected]/msg01050.html
Parameters
----------
dayofyear : numeric
Returns
-------
equation_of_time : numeric
Difference in time between solar time and mean solar time in minutes.
References
----------
.. [1] J. W. Spencer, "Fourier series representation of the position of the
sun" in Search 2 (5), p. 172 (1971)
.. [2] J. A. Duffie and W. A. Beckman, "Solar Engineering of Thermal
Processes, 3rd Edition" pp. 9-11, J. Wiley and Sons, New York (2006)
.. [3] Frank Vignola et al., "Solar And Infrared Radiation Measurements",
p. 13, CRC Press (2012)
.. [5] Daryl R. Myers, "Solar Radiation: Practical Modeling for Renewable
Energy Applications", p. 5 CRC Press (2013)
.. [4] Roland Hulstrom, "Solar Resources" p. 66, MIT Press (1989)
See Also
--------
equation_of_time_pvcdrom
"""
day_angle = _calculate_simple_day_angle(dayofyear)
# convert from radians to minutes per day = 24[h/day] * 60[min/h] / 2 / pi
eot = (1440.0 / 2 / np.pi) * (
0.0000075 +
0.001868 * np.cos(day_angle) - 0.032077 * np.sin(day_angle) -
0.014615 * np.cos(2.0 * day_angle) - 0.040849 * np.sin(2.0 * day_angle)
)
return eot
def equation_of_time_pvcdrom(dayofyear):
"""
Equation of time from PVCDROM.
`PVCDROM`_ is a website by Solar Power Lab at Arizona State
University (ASU)
.. _PVCDROM: http://www.pveducation.org/pvcdrom/2-properties-sunlight/solar-time
Parameters
----------
dayofyear : numeric
Returns
-------
equation_of_time : numeric
Difference in time between solar time and mean solar time in minutes.
References
----------
[1] Soteris A. Kalogirou, "Solar Energy Engineering Processes and
Systems, 2nd Edition" Elselvier/Academic Press (2009).
See Also
--------
equation_of_time_Spencer71
"""
# day angle relative to Vernal Equinox, typically March 22 (day number 81)
bday = \
_calculate_simple_day_angle(dayofyear) - (2.0 * np.pi / 365.0) * 80.0
# same value but about 2x faster than Spencer (1971)
return 9.87 * np.sin(2.0 * bday) - 7.53 * np.cos(bday) - 1.5 * np.sin(bday)
def declination_spencer71(dayofyear):
"""
Solar declination from Duffie & Beckman [1] and attributed to
Spencer (1971) and Iqbal (1983).
.. warning::
Return units are radians, not degrees.
Parameters
----------
dayofyear : numeric
Returns
-------
declination (radians) : numeric
Angular position of the sun at solar noon relative to the plane of the
equator, approximately between +/-23.45 (degrees).
References
----------
[1] J. A. Duffie and W. A. Beckman, "Solar Engineering of Thermal
Processes, 3rd Edition" pp. 13-14, J. Wiley and Sons, New York (2006)
[2] J. W. Spencer, "Fourier series representation of the position of the
sun" in Search 2 (5), p. 172 (1971)
[3] Daryl R. Myers, "Solar Radiation: Practical Modeling for Renewable
Energy Applications", p. 4 CRC Press (2013)
See Also
--------
declination_cooper69
"""
day_angle = _calculate_simple_day_angle(dayofyear)
return (
0.006918 -
0.399912 * np.cos(day_angle) + 0.070257 * np.sin(day_angle) -
0.006758 * np.cos(2. * day_angle) + 0.000907 * np.sin(2. * day_angle) -
0.002697 * np.cos(3. * day_angle) + 0.00148 * np.sin(3. * day_angle)
)
def declination_cooper69(dayofyear):
"""
Solar declination from Duffie & Beckman [1] and attributed to Cooper (1969)
.. warning::
Return units are radians, not degrees.
Declination can be expressed using either sine or cosine:
.. math::
\\delta = 23.45 \\sin \\left( \\frac{2 \\pi}{365} \\left(n_{day} + 284
\\right) \\right) = -23.45 \\cos \\left( \\frac{2 \\pi}{365}
\\left(n_{day} + 10 \\right) \\right)
Parameters
----------
dayofyear : numeric
Returns
-------
declination (radians) : numeric
Angular position of the sun at solar noon relative to the plane of the
equator, approximately between +/-23.45 (degrees).
References
----------
[1] J. A. Duffie and W. A. Beckman, "Solar Engineering of Thermal
Processes, 3rd Edition" pp. 13-14, J. Wiley and Sons, New York (2006)
[2] J. H. Seinfeld and S. N. Pandis, "Atmospheric Chemistry and Physics"
p. 129, J. Wiley (1998)
[3] Daryl R. Myers, "Solar Radiation: Practical Modeling for Renewable
Energy Applications", p. 4 CRC Press (2013)
See Also
--------
declination_spencer71
"""
day_angle = _calculate_simple_day_angle(dayofyear)
dec = np.deg2rad(23.45 * np.sin(day_angle + (2.0 * np.pi / 365.0) * 285.0))
return dec
def solar_azimuth_analytical(latitude, hourangle, declination, zenith):
"""
Analytical expression of solar azimuth angle based on spherical
trigonometry.
Parameters
----------
latitude : numeric
Latitude of location in radians.
hourangle : numeric
Hour angle in the local solar time in radians.
declination : numeric
Declination of the sun in radians.
zenith : numeric
Solar zenith angle in radians.
Returns
-------
azimuth : numeric
Solar azimuth angle in radians.
References
----------
[1] J. A. Duffie and W. A. Beckman, "Solar Engineering of Thermal
Processes, 3rd Edition" pp. 14, J. Wiley and Sons, New York (2006)
[2] J. H. Seinfeld and S. N. Pandis, "Atmospheric Chemistry and Physics"
p. 132, J. Wiley (1998)
[3] `Wikipedia: Solar Azimuth Angle
<https://en.wikipedia.org/wiki/Solar_azimuth_angle>`_
[4] `PVCDROM: Azimuth Angle <http://www.pveducation.org/pvcdrom/2-
properties-sunlight/azimuth-angle>`_
See Also
--------
declination_spencer71
declination_cooper69
hour_angle
solar_zenith_analytical
"""
numer = (np.cos(zenith) * np.sin(latitude) - np.sin(declination))
denom = (np.sin(zenith) * np.cos(latitude))
# cases that would generate new NaN values are safely ignored here
# since they are dealt with further below
with np.errstate(invalid='ignore', divide='ignore'):
cos_azi = numer / denom
# when zero division occurs, use the limit value of the analytical
# expression
cos_azi = \
np.where(np.isclose(denom, 0.0, rtol=0.0, atol=1e-8), 1.0, cos_azi)
# when too many round-ups in floating point math take cos_azi beyond
# 1.0, use 1.0
cos_azi = \
np.where(np.isclose(cos_azi, 1.0, rtol=0.0, atol=1e-8), 1.0, cos_azi)
cos_azi = \
np.where(np.isclose(cos_azi, -1.0, rtol=0.0, atol=1e-8), -1.0, cos_azi)
# when NaN values occur in input, ignore and pass to output
with np.errstate(invalid='ignore'):
sign_ha = np.sign(hourangle)
return sign_ha * np.arccos(cos_azi) + np.pi
def solar_zenith_analytical(latitude, hourangle, declination):
"""
Analytical expression of solar zenith angle based on spherical
trigonometry.
.. warning:: The analytic form neglects the effect of atmospheric
refraction.
Parameters
----------
latitude : numeric
Latitude of location in radians.
hourangle : numeric
Hour angle in the local solar time in radians.
declination : numeric
Declination of the sun in radians.
Returns
-------
zenith : numeric
Solar zenith angle in radians.
References
----------
[1] J. A. Duffie and W. A. Beckman, "Solar Engineering of Thermal
Processes, 3rd Edition" pp. 14, J. Wiley and Sons, New York (2006)
[2] J. H. Seinfeld and S. N. Pandis, "Atmospheric Chemistry and
Physics" p. 132, J. Wiley (1998)
[3] Daryl R. Myers, "Solar Radiation: Practical Modeling for
Renewable Energy Applications", p. 5 CRC Press (2013)
`Wikipedia: Solar Zenith Angle
<https://en.wikipedia.org/wiki/Solar_zenith_angle>`_
`PVCDROM: Sun's Position
<http://www.pveducation.org/pvcdrom/2-properties-sunlight/suns-position>`_
See Also
--------
declination_spencer71
declination_cooper69
hour_angle
"""
return np.arccos(
np.cos(declination) * np.cos(latitude) * np.cos(hourangle) +
np.sin(declination) * np.sin(latitude)
)
def hour_angle(times, longitude, equation_of_time):
"""
Hour angle in local solar time. Zero at local solar noon.
Parameters
----------
times : :class:`pandas.DatetimeIndex`
Corresponding timestamps, must be localized to the timezone for the
``longitude``.
longitude : numeric
Longitude in degrees
equation_of_time : numeric
Equation of time in minutes.
Returns
-------
hour_angle : numeric
Hour angle in local solar time in degrees.
References
----------
[1] J. A. Duffie and W. A. Beckman, "Solar Engineering of Thermal
Processes, 3rd Edition" pp. 13, J. Wiley and Sons, New York (2006)
[2] J. H. Seinfeld and S. N. Pandis, "Atmospheric Chemistry and Physics"
p. 132, J. Wiley (1998)
[3] Daryl R. Myers, "Solar Radiation: Practical Modeling for Renewable
Energy Applications", p. 5 CRC Press (2013)
See Also
--------
equation_of_time_Spencer71
equation_of_time_pvcdrom
"""
naive_times = times.tz_localize(None) # naive but still localized
# hours - timezone = (times - normalized_times) - (naive_times - times)
hrs_minus_tzs = 1 / NS_PER_HR * (
2 * times.astype(np.int64) - times.normalize().astype(np.int64) -
naive_times.astype(np.int64))
# ensure array return instead of a version-dependent pandas <T>Index
return np.asarray(
15. * (hrs_minus_tzs - 12.) + longitude + equation_of_time / 4.)
def _hour_angle_to_hours(times, hourangle, longitude, equation_of_time):
"""converts hour angles in degrees to hours as a numpy array"""
naive_times = times.tz_localize(None) # naive but still localized
tzs = 1 / NS_PER_HR * (
naive_times.astype(np.int64) - times.astype(np.int64))
hours = (hourangle - longitude - equation_of_time / 4.) / 15. + 12. + tzs
return np.asarray(hours)
def _local_times_from_hours_since_midnight(times, hours):
"""
converts hours since midnight from an array of floats to localized times
"""
tz_info = times.tz # pytz timezone info
naive_times = times.tz_localize(None) # naive but still localized
# normalize local, naive times to previous midnight and add the hours until
# sunrise, sunset, and transit
return pd.DatetimeIndex(
(naive_times.normalize().astype(np.int64) +
(hours * NS_PER_HR).astype(np.int64)).astype('datetime64[ns]'),
tz=tz_info)
def _times_to_hours_after_local_midnight(times):
"""convert local pandas datetime indices to array of hours as floats"""
times = times.tz_localize(None)
hrs = 1 / NS_PER_HR * (
times.astype(np.int64) - times.normalize().astype(np.int64))
return np.array(hrs)
def sun_rise_set_transit_geometric(times, latitude, longitude, declination,
equation_of_time):
"""
Geometric calculation of solar sunrise, sunset, and transit.
.. warning:: The geometric calculation assumes a circular earth orbit with
the sun as a point source at its center, and neglects the effect of
atmospheric refraction on zenith. The error depends on location and
time of year but is of order 10 minutes.
Parameters
----------
times : pandas.DatetimeIndex
Corresponding timestamps, must be localized to the timezone for the
``latitude`` and ``longitude``.
latitude : float
Latitude in degrees, positive north of equator, negative to south
longitude : float
Longitude in degrees, positive east of prime meridian, negative to west
declination : numeric
declination angle in radians at ``times``
equation_of_time : numeric
difference in time between solar time and mean solar time in minutes
Returns
-------
sunrise : datetime
localized sunrise time
sunset : datetime
localized sunset time
transit : datetime
localized sun transit time
References
----------
[1] J. A. Duffie and W. A. Beckman, "Solar Engineering of Thermal
Processes, 3rd Edition," J. Wiley and Sons, New York (2006)
[2] Frank Vignola et al., "Solar And Infrared Radiation Measurements,"
CRC Press (2012)
"""
latitude_rad = np.radians(latitude) # radians
sunset_angle_rad = np.arccos(-np.tan(declination) * np.tan(latitude_rad))
sunset_angle = np.degrees(sunset_angle_rad) # degrees
# solar noon is at hour angle zero
# so sunrise is just negative of sunset
sunrise_angle = -sunset_angle
sunrise_hour = _hour_angle_to_hours(
times, sunrise_angle, longitude, equation_of_time)
sunset_hour = _hour_angle_to_hours(
times, sunset_angle, longitude, equation_of_time)
transit_hour = _hour_angle_to_hours(times, 0, longitude, equation_of_time)
sunrise = _local_times_from_hours_since_midnight(times, sunrise_hour)
sunset = _local_times_from_hours_since_midnight(times, sunset_hour)
transit = _local_times_from_hours_since_midnight(times, transit_hour)
return sunrise, sunset, transit
| bsd-3-clause |
zfrenchee/pandas | pandas/io/json/normalize.py | 1 | 9164 | # ---------------------------------------------------------------------
# JSON normalization routines
import copy
from collections import defaultdict
import numpy as np
from pandas._libs.lib import convert_json_to_lines
from pandas import compat, DataFrame
def _convert_to_line_delimits(s):
"""Helper function that converts json lists to line delimited json."""
# Determine we have a JSON list to turn to lines otherwise just return the
# json object, only lists can
if not s[0] == '[' and s[-1] == ']':
return s
s = s[1:-1]
return convert_json_to_lines(s)
def nested_to_record(ds, prefix="", sep=".", level=0):
"""a simplified json_normalize
converts a nested dict into a flat dict ("record"), unlike json_normalize,
it does not attempt to extract a subset of the data.
Parameters
----------
ds : dict or list of dicts
prefix: the prefix, optional, default: ""
sep : string, default '.'
Nested records will generate names separated by sep,
e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
.. versionadded:: 0.20.0
level: the number of levels in the jason string, optional, default: 0
Returns
-------
d - dict or list of dicts, matching `ds`
Examples
--------
IN[52]: nested_to_record(dict(flat1=1,dict1=dict(c=1,d=2),
nested=dict(e=dict(c=1,d=2),d=2)))
Out[52]:
{'dict1.c': 1,
'dict1.d': 2,
'flat1': 1,
'nested.d': 2,
'nested.e.c': 1,
'nested.e.d': 2}
"""
singleton = False
if isinstance(ds, dict):
ds = [ds]
singleton = True
new_ds = []
for d in ds:
new_d = copy.deepcopy(d)
for k, v in d.items():
# each key gets renamed with prefix
if not isinstance(k, compat.string_types):
k = str(k)
if level == 0:
newkey = k
else:
newkey = prefix + sep + k
# only dicts gets recurse-flattend
# only at level>1 do we rename the rest of the keys
if not isinstance(v, dict):
if level != 0: # so we skip copying for top level, common case
v = new_d.pop(k)
new_d[newkey] = v
continue
else:
v = new_d.pop(k)
new_d.update(nested_to_record(v, newkey, sep, level + 1))
new_ds.append(new_d)
if singleton:
return new_ds[0]
return new_ds
def json_normalize(data, record_path=None, meta=None,
meta_prefix=None,
record_prefix=None,
errors='raise',
sep='.'):
"""
"Normalize" semi-structured JSON data into a flat table
Parameters
----------
data : dict or list of dicts
Unserialized JSON objects
record_path : string or list of strings, default None
Path in each object to list of records. If not passed, data will be
assumed to be an array of records
meta : list of paths (string or list of strings), default None
Fields to use as metadata for each record in resulting table
record_prefix : string, default None
If True, prefix records with dotted (?) path, e.g. foo.bar.field if
path to records is ['foo', 'bar']
meta_prefix : string, default None
errors : {'raise', 'ignore'}, default 'raise'
* 'ignore' : will ignore KeyError if keys listed in meta are not
always present
* 'raise' : will raise KeyError if keys listed in meta are not
always present
.. versionadded:: 0.20.0
sep : string, default '.'
Nested records will generate names separated by sep,
e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
.. versionadded:: 0.20.0
Returns
-------
frame : DataFrame
Examples
--------
>>> from pandas.io.json import json_normalize
>>> data = [{'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}},
... {'name': {'given': 'Mose', 'family': 'Regner'}},
... {'id': 2, 'name': 'Faye Raker'}]
>>> json_normalize(data)
id name name.family name.first name.given name.last
0 1.0 NaN NaN Coleen NaN Volk
1 NaN NaN Regner NaN Mose NaN
2 2.0 Faye Raker NaN NaN NaN NaN
>>> data = [{'state': 'Florida',
... 'shortname': 'FL',
... 'info': {
... 'governor': 'Rick Scott'
... },
... 'counties': [{'name': 'Dade', 'population': 12345},
... {'name': 'Broward', 'population': 40000},
... {'name': 'Palm Beach', 'population': 60000}]},
... {'state': 'Ohio',
... 'shortname': 'OH',
... 'info': {
... 'governor': 'John Kasich'
... },
... 'counties': [{'name': 'Summit', 'population': 1234},
... {'name': 'Cuyahoga', 'population': 1337}]}]
>>> result = json_normalize(data, 'counties', ['state', 'shortname',
... ['info', 'governor']])
>>> result
name population info.governor state shortname
0 Dade 12345 Rick Scott Florida FL
1 Broward 40000 Rick Scott Florida FL
2 Palm Beach 60000 Rick Scott Florida FL
3 Summit 1234 John Kasich Ohio OH
4 Cuyahoga 1337 John Kasich Ohio OH
"""
def _pull_field(js, spec):
result = js
if isinstance(spec, list):
for field in spec:
result = result[field]
else:
result = result[spec]
return result
if isinstance(data, list) and not data:
return DataFrame()
# A bit of a hackjob
if isinstance(data, dict):
data = [data]
if record_path is None:
if any(isinstance(x, dict) for x in compat.itervalues(data[0])):
# naive normalization, this is idempotent for flat records
# and potentially will inflate the data considerably for
# deeply nested structures:
# {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@}
#
# TODO: handle record value which are lists, at least error
# reasonably
data = nested_to_record(data, sep=sep)
return DataFrame(data)
elif not isinstance(record_path, list):
record_path = [record_path]
if meta is None:
meta = []
elif not isinstance(meta, list):
meta = [meta]
meta = [m if isinstance(m, list) else [m] for m in meta]
# Disastrously inefficient for now
records = []
lengths = []
meta_vals = defaultdict(list)
if not isinstance(sep, compat.string_types):
sep = str(sep)
meta_keys = [sep.join(val) for val in meta]
def _recursive_extract(data, path, seen_meta, level=0):
if len(path) > 1:
for obj in data:
for val, key in zip(meta, meta_keys):
if level + 1 == len(val):
seen_meta[key] = _pull_field(obj, val[-1])
_recursive_extract(obj[path[0]], path[1:],
seen_meta, level=level + 1)
else:
for obj in data:
recs = _pull_field(obj, path[0])
# For repeating the metadata later
lengths.append(len(recs))
for val, key in zip(meta, meta_keys):
if level + 1 > len(val):
meta_val = seen_meta[key]
else:
try:
meta_val = _pull_field(obj, val[level:])
except KeyError as e:
if errors == 'ignore':
meta_val = np.nan
else:
raise \
KeyError("Try running with "
"errors='ignore' as key "
"{err} is not always present"
.format(err=e))
meta_vals[key].append(meta_val)
records.extend(recs)
_recursive_extract(data, record_path, {}, level=0)
result = DataFrame(records)
if record_prefix is not None:
result.rename(columns=lambda x: record_prefix + x, inplace=True)
# Data types, a problem
for k, v in compat.iteritems(meta_vals):
if meta_prefix is not None:
k = meta_prefix + k
if k in result:
raise ValueError('Conflicting metadata name {name}, '
'need distinguishing prefix '.format(name=k))
result[k] = np.array(v).repeat(lengths)
return result
| bsd-3-clause |
soulmachine/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 41 | 4827 | from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
derekjchow/models | research/tcn/dataset/webcam.py | 5 | 16225 | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Collect images from multiple simultaneous webcams.
Usage:
1. Define some environment variables that describe what you're collecting.
dataset=your_dataset_name
mode=train
num_views=2
viddir=/tmp/tcn/videos
tmp_imagedir=/tmp/tcn/tmp_images
debug_vids=1
2. Run the script.
export DISPLAY=:0.0 && \
root=learning/brain/research/tcn && \
bazel build -c opt --copt=-mavx tcn/webcam && \
bazel-bin/tcn/webcam \
--dataset $dataset \
--mode $mode \
--num_views $num_views \
--tmp_imagedir $tmp_imagedir \
--viddir $viddir \
--debug_vids 1 \
--logtostderr
3. Hit Ctrl-C when done collecting, upon which the script will compile videos
for each view and optionally a debug video concatenating multiple
simultaneous views.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
from multiprocessing import Process
import os
import subprocess
import sys
import time
import cv2
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import animation # pylint: disable=g-import-not-at-top
import matplotlib.pyplot as plt
import numpy as np
from six.moves import input
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
tf.flags.DEFINE_string('dataset', '', 'Name of the dataset we`re collecting.')
tf.flags.DEFINE_string('mode', '',
'What type of data we`re collecting. E.g.:'
'`train`,`valid`,`test`, or `demo`')
tf.flags.DEFINE_string('seqname', '',
'Name of this sequence. If empty, the script will use'
'the name seq_N+1 where seq_N is the latest'
'integer-named sequence in the videos directory.')
tf.flags.DEFINE_integer('num_views', 2,
'Number of webcams.')
tf.flags.DEFINE_string('tmp_imagedir', '/tmp/tcn/data',
'Temporary outdir to write images.')
tf.flags.DEFINE_string('viddir', '/tmp/tcn/videos',
'Base directory to write debug videos.')
tf.flags.DEFINE_boolean('debug_vids', True,
'Whether to generate debug vids with multiple'
'concatenated views.')
tf.flags.DEFINE_string('debug_lhs_view', '0',
'Which viewpoint to use for the lhs video.')
tf.flags.DEFINE_string('debug_rhs_view', '1',
'Which viewpoint to use for the rhs video.')
tf.flags.DEFINE_integer('height', 1080, 'Raw input height.')
tf.flags.DEFINE_integer('width', 1920, 'Raw input width.')
tf.flags.DEFINE_string('webcam_ports', None,
'Comma-separated list of each webcam usb port.')
FLAGS = tf.app.flags.FLAGS
class ImageQueue(object):
"""An image queue holding each stream's most recent image.
Basically implements a process-safe collections.deque(maxlen=1).
"""
def __init__(self):
self.lock = multiprocessing.Lock()
self._queue = multiprocessing.Queue(maxsize=1)
def append(self, data):
with self.lock:
if self._queue.full():
# Pop the first element.
_ = self._queue.get()
self._queue.put(data)
def get(self):
with self.lock:
return self._queue.get()
def empty(self):
return self._queue.empty()
def close(self):
return self._queue.close()
class WebcamViewer(object):
"""A class which displays a live stream from the webcams."""
def __init__(self, display_queues):
"""Create a WebcamViewer instance."""
self.height = FLAGS.height
self.width = FLAGS.width
self.queues = display_queues
def _get_next_images(self):
"""Gets the next image to display."""
# Wait for one image per view.
not_found = True
while not_found:
if True in [q.empty() for q in self.queues]:
# At least one image queue is empty; wait.
continue
else:
# Retrieve the images.
latest = [q.get() for q in self.queues]
combined = np.concatenate(latest, axis=1)
not_found = False
return combined
def run(self):
"""Displays the Kcam live stream in a window.
This function blocks until the window is closed.
"""
fig, rgb_axis = plt.subplots()
image_rows = self.height
image_cols = self.width * FLAGS.num_views
initial_image = np.zeros((image_rows, image_cols, 3))
rgb_image = rgb_axis.imshow(initial_image, interpolation='nearest')
def update_figure(frame_index):
"""Animation function for matplotlib FuncAnimation. Updates the image.
Args:
frame_index: The frame number.
Returns:
An iterable of matplotlib drawables to clear.
"""
_ = frame_index
images = self._get_next_images()
images = images[..., [2, 1, 0]]
rgb_image.set_array(images)
return rgb_image,
# We must keep a reference to this animation in order for it to work.
unused_animation = animation.FuncAnimation(
fig, update_figure, interval=50, blit=True)
mng = plt.get_current_fig_manager()
mng.resize(*mng.window.maxsize())
plt.show()
def reconcile(queues, write_queue):
"""Gets a list of concurrent images from each view queue.
This waits for latest images to be available in all view queues,
then continuously:
- Creates a list of current images for each view.
- Writes the list to a queue of image lists to write to disk.
Args:
queues: A list of `ImageQueues`, holding the latest image from each webcam.
write_queue: A multiprocessing.Queue holding lists of concurrent images.
"""
# Loop forever.
while True:
# Wait till all queues have an image.
if True in [q.empty() for q in queues]:
continue
else:
# Retrieve all views' images.
latest = [q.get() for q in queues]
# Copy the list of all concurrent images to the write queue.
write_queue.put(latest)
def persist(write_queue, view_dirs):
"""Pulls lists of concurrent images off a write queue, writes them to disk.
Args:
write_queue: A multiprocessing.Queue holding lists of concurrent images;
one image per view.
view_dirs: A list of strings, holding the output image directories for each
view.
"""
timestep = 0
while True:
# Wait till there is work in the queue.
if write_queue.empty():
continue
# Get a list of concurrent images to write to disk.
view_ims = write_queue.get()
for view_idx, image in enumerate(view_ims):
view_base = view_dirs[view_idx]
# Assign all concurrent view images the same sequence timestep.
fname = os.path.join(view_base, '%s.png' % str(timestep).zfill(10))
cv2.imwrite(fname, image)
# Move to the next timestep.
timestep += 1
def get_image(camera):
"""Captures a single image from the camera and returns it in PIL format."""
data = camera.read()
_, im = data
return im
def capture_webcam(camera, display_queue, reconcile_queue):
"""Captures images from simultaneous webcams, writes them to queues.
Args:
camera: A cv2.VideoCapture object representing an open webcam stream.
display_queue: An ImageQueue.
reconcile_queue: An ImageQueue.
"""
# Take some ramp images to allow cams to adjust for brightness etc.
for i in range(60):
tf.logging.info('Taking ramp image %d.' % i)
get_image(camera)
cnt = 0
start = time.time()
while True:
# Get images for all cameras.
im = get_image(camera)
# Replace the current image in the display and reconcile queues.
display_queue.append(im)
reconcile_queue.append(im)
cnt += 1
current = time.time()
if cnt % 100 == 0:
tf.logging.info('Collected %s of video, %d frames at ~%.2f fps.' % (
timer(start, current), cnt, cnt/(current-start)))
def timer(start, end):
"""Returns a formatted time elapsed."""
hours, rem = divmod(end-start, 3600)
minutes, seconds = divmod(rem, 60)
return '{:0>2}:{:0>2}:{:05.2f}'.format(int(hours), int(minutes), seconds)
def display_webcams(display_queues):
"""Builds an WebcamViewer to animate incoming images, runs it."""
viewer = WebcamViewer(display_queues)
viewer.run()
def create_vids(view_dirs, seqname):
"""Creates one video per view per sequence."""
vidbase = os.path.join(FLAGS.viddir, FLAGS.dataset, FLAGS.mode)
if not os.path.exists(vidbase):
os.makedirs(vidbase)
vidpaths = []
for idx, view_dir in enumerate(view_dirs):
vidname = os.path.join(vidbase, '%s_view%d.mp4' % (seqname, idx))
encode_vid_cmd = r'mencoder mf://%s/*.png \
-mf fps=29:type=png \
-ovc lavc -lavcopts vcodec=mpeg4:mbd=2:trell \
-oac copy -o %s' % (view_dir, vidname)
os.system(encode_vid_cmd)
vidpaths.append(vidname)
debugpath = None
if FLAGS.debug_vids:
lhs = vidpaths[FLAGS.debug_lhs_view]
rhs = vidpaths[FLAGS.debug_rhs_view]
debug_base = os.path.join('%s_debug' % FLAGS.viddir, FLAGS.dataset,
FLAGS.mode)
if not os.path.exists(debug_base):
os.makedirs(debug_base)
debugpath = '%s/%s.mp4' % (debug_base, seqname)
os.system(r"avconv \
-i %s \
-i %s \
-filter_complex '[0:v]pad=iw*2:ih[int];[int][1:v]overlay=W/2:0[vid]' \
-map [vid] \
-c:v libx264 \
-crf 23 \
-preset veryfast \
%s" % (lhs, rhs, debugpath))
return vidpaths, debugpath
def setup_paths():
"""Sets up the necessary paths to collect videos."""
assert FLAGS.dataset
assert FLAGS.mode
assert FLAGS.num_views
# Setup directory for final images used to create videos for this sequence.
tmp_imagedir = os.path.join(FLAGS.tmp_imagedir, FLAGS.dataset, FLAGS.mode)
if not os.path.exists(tmp_imagedir):
os.makedirs(tmp_imagedir)
# Create a base directory to hold all sequence videos if it doesn't exist.
vidbase = os.path.join(FLAGS.viddir, FLAGS.dataset, FLAGS.mode)
if not os.path.exists(vidbase):
os.makedirs(vidbase)
# Get one directory per concurrent view and a sequence name.
view_dirs, seqname = get_view_dirs(vidbase, tmp_imagedir)
# Get an output path to each view's video.
vid_paths = []
for idx, _ in enumerate(view_dirs):
vid_path = os.path.join(vidbase, '%s_view%d.mp4' % (seqname, idx))
vid_paths.append(vid_path)
# Optionally build paths to debug_videos.
debug_path = None
if FLAGS.debug_vids:
debug_base = os.path.join('%s_debug' % FLAGS.viddir, FLAGS.dataset,
FLAGS.mode)
if not os.path.exists(debug_base):
os.makedirs(debug_base)
debug_path = '%s/%s.mp4' % (debug_base, seqname)
return view_dirs, vid_paths, debug_path
def get_view_dirs(vidbase, tmp_imagedir):
"""Creates and returns one view directory per webcam."""
# Create and append a sequence name.
if FLAGS.seqname:
seqname = FLAGS.seqname
else:
# If there's no video directory, this is the first sequence.
if not os.listdir(vidbase):
seqname = '0'
else:
# Otherwise, get the latest sequence name and increment it.
seq_names = [i.split('_')[0] for i in os.listdir(vidbase)]
latest_seq = sorted(map(int, seq_names), reverse=True)[0]
seqname = str(latest_seq+1)
tf.logging.info('No seqname specified, using: %s' % seqname)
view_dirs = [os.path.join(
tmp_imagedir, '%s_view%d' % (seqname, v)) for v in range(FLAGS.num_views)]
for d in view_dirs:
if not os.path.exists(d):
os.makedirs(d)
return view_dirs, seqname
def get_cameras():
"""Opens cameras using cv2, ensures they can take images."""
# Try to get free webcam ports.
if FLAGS.webcam_ports:
ports = map(int, FLAGS.webcam_ports.split(','))
else:
ports = range(FLAGS.num_views)
cameras = [cv2.VideoCapture(i) for i in ports]
if not all([i.isOpened() for i in cameras]):
try:
# Try to find and kill hanging cv2 process_ids.
output = subprocess.check_output(['lsof -t /dev/video*'], shell=True)
tf.logging.info('Found hanging cv2 process_ids: \n')
tf.logging.info(output)
tf.logging.info('Killing hanging processes...')
for process_id in output.split('\n')[:-1]:
subprocess.call(['kill %s' % process_id], shell=True)
time.sleep(3)
# Recapture webcams.
cameras = [cv2.VideoCapture(i) for i in ports]
except subprocess.CalledProcessError:
raise ValueError(
'Cannot connect to cameras. Try running: \n'
'ls -ltrh /dev/video* \n '
'to see which ports your webcams are connected to. Then hand those '
'ports as a comma-separated list to --webcam_ports, e.g. '
'--webcam_ports 0,1')
# Verify each camera is able to capture images.
ims = map(get_image, cameras)
assert False not in [i is not None for i in ims]
return cameras
def launch_images_to_videos(view_dirs, vid_paths, debug_path):
"""Launch job in separate process to convert images to videos."""
f = 'learning/brain/research/tcn/dataset/images_to_videos.py'
cmd = ['python %s ' % f]
cmd += ['--view_dirs %s ' % ','.join(i for i in view_dirs)]
cmd += ['--vid_paths %s ' % ','.join(i for i in vid_paths)]
cmd += ['--debug_path %s ' % debug_path]
cmd += ['--debug_lhs_view %s ' % FLAGS.debug_lhs_view]
cmd += ['--debug_rhs_view %s ' % FLAGS.debug_rhs_view]
cmd += [' & ']
cmd = ''.join(i for i in cmd)
# Call images_to_videos asynchronously.
fnull = open(os.devnull, 'w')
subprocess.Popen([cmd], stdout=fnull, stderr=subprocess.STDOUT, shell=True)
for p in vid_paths:
tf.logging.info('Writing final video to: %s' % p)
if debug_path:
tf.logging.info('Writing debug video to: %s' % debug_path)
def main(_):
# Initialize the camera capture objects.
cameras = get_cameras()
# Get one output directory per view.
view_dirs, vid_paths, debug_path = setup_paths()
try:
# Wait for user input.
try:
tf.logging.info('About to write to:')
for v in view_dirs:
tf.logging.info(v)
input('Press Enter to continue...')
except SyntaxError:
pass
# Create a queue per view for displaying and saving images.
display_queues = [ImageQueue() for _ in range(FLAGS.num_views)]
reconcile_queues = [ImageQueue() for _ in range(FLAGS.num_views)]
# Create a queue for collecting all tuples of multi-view images to write to
# disk.
write_queue = multiprocessing.Queue()
processes = []
# Create a process to display collected images in real time.
processes.append(Process(target=display_webcams, args=(display_queues,)))
# Create a process to collect the latest simultaneous images from each view.
processes.append(Process(
target=reconcile, args=(reconcile_queues, write_queue,)))
# Create a process to collect the latest simultaneous images from each view.
processes.append(Process(
target=persist, args=(write_queue, view_dirs,)))
for (cam, dq, rq) in zip(cameras, display_queues, reconcile_queues):
processes.append(Process(
target=capture_webcam, args=(cam, dq, rq,)))
for p in processes:
p.start()
for p in processes:
p.join()
except KeyboardInterrupt:
# Close the queues.
for q in display_queues + reconcile_queues:
q.close()
# Release the cameras.
for cam in cameras:
cam.release()
# Launch images_to_videos script asynchronously.
launch_images_to_videos(view_dirs, vid_paths, debug_path)
try:
sys.exit(0)
except SystemExit:
os._exit(0) # pylint: disable=protected-access
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
jason-neal/companion_simulations | simulators/bhm_module.py | 1 | 7262 | import logging
import os
import numpy as np
import pandas as pd
from logutils import BraceMessage as __
from tqdm import tqdm
import simulators
from mingle.models.broadcasted_models import one_comp_model
from mingle.utilities.chisqr import chi_squared
from mingle.utilities.phoenix_utils import generate_bhm_config_params
from mingle.utilities.phoenix_utils import load_starfish_spectrum, closest_model_params, generate_close_params
from mingle.utilities.xcorr import xcorr_peak
from simulators.common_setup import setup_dirs, sim_helper_function
from simulators.iam_module import arbitrary_minimums, arbitrary_rescale
from simulators.iam_module import renormalization
from numpy import float64, int64, ndarray
from typing import Dict, List, Optional, Tuple, Union
def setup_bhm_dirs(star: str) -> None:
setup_dirs(star, mode="bhm")
return None
def bhm_analysis(obs_spec, model_pars, gammas=None, errors=None, prefix=None, verbose=False, chip=None, norm=False,
wav_scale=True, norm_method="scalar"):
"""Run one component model over all parameter combinations in model_pars."""
# Gammas
if gammas is None:
gammas = np.array([0])
elif isinstance(gammas, (float, int)):
gammas = np.asarray(gammas, dtype=np.float32)
if isinstance(model_pars, list):
logging.debug(__("Number of close model_pars returned {0}", len(model_pars)))
# Solution Grids to return
model_chisqr_vals = np.empty(len(model_pars))
model_xcorr_vals = np.empty(len(model_pars))
model_xcorr_rv_vals = np.empty(len(model_pars))
bhm_grid_chisqr_vals = np.empty(len(model_pars))
bhm_grid_gamma = np.empty(len(model_pars))
full_bhm_grid_chisquare = np.empty((len(model_pars), len(gammas)))
normalization_limits = [2105, 2185] # small as possible?
for ii, params in enumerate(tqdm(model_pars)):
if prefix is None:
save_name = os.path.join(
simulators.paths["output_dir"], obs_spec.header["OBJECT"].upper(), "bhm",
"bhm_{0}_{1}_{3}_part{2}.csv".format(
obs_spec.header["OBJECT"].upper(), obs_spec.header["MJD-OBS"], ii, chip))
else:
save_name = os.path.join("{0}_part{1}.csv".format(prefix, ii))
if verbose:
print("Starting iteration with parameter:s\n{}".format(params))
mod_spec = load_starfish_spectrum(params, limits=normalization_limits, hdr=True,
normalize=True, wav_scale=wav_scale)
# Wavelength selection
mod_spec.wav_select(np.min(obs_spec.xaxis) - 5,
np.max(obs_spec.xaxis) + 5) # +- 5nm of obs
obs_spec = obs_spec.remove_nans()
# One component model with broadcasting over gammas
bhm_grid_func = one_comp_model(mod_spec.xaxis, mod_spec.flux, gammas=gammas)
bhm_grid_values = bhm_grid_func(obs_spec.xaxis)
assert ~np.any(np.isnan(obs_spec.flux)), "Observation is nan"
# RENORMALIZATION
if chip == 4:
# Quadratically renormalize anyway
obs_spec = renormalization(obs_spec, bhm_grid_values, normalize=True, method="quadratic")
obs_flux = renormalization(obs_spec, bhm_grid_values, normalize=norm, method=norm_method)
# Simple chi2
bhm_grid_chisquare_old = chi_squared(obs_flux, bhm_grid_values, error=errors)
# Applying arbitrary scalar normalization to continuum
bhm_norm_grid_values, arb_norm = arbitrary_rescale(bhm_grid_values,
*simulators.sim_grid["arb_norm"])
# Calculate Chi-squared
obs_flux = np.expand_dims(obs_flux, -1) # expand on last axis to match rescale
bhm_norm_grid_chisquare = chi_squared(obs_flux, bhm_norm_grid_values, error=errors)
# Take minimum chi-squared value along Arbitrary normalization axis
bhm_grid_chisquare, arbitrary_norms = arbitrary_minimums(bhm_norm_grid_chisquare, arb_norm)
assert np.any(
bhm_grid_chisquare_old >= bhm_grid_chisquare), "All chi2 values are not better or same with arbitrary scaling"
# Interpolate to obs
mod_spec.spline_interpolate_to(obs_spec)
org_model_chi_val = chi_squared(obs_spec.flux, mod_spec.flux)
model_chisqr_vals[ii] = org_model_chi_val # This is gamma = 0 version
# New parameters to explore
bhm_grid_chisqr_vals[ii] = bhm_grid_chisquare[np.argmin(bhm_grid_chisquare)]
bhm_grid_gamma[ii] = gammas[np.argmin(bhm_grid_chisquare)]
full_bhm_grid_chisquare[ii, :] = bhm_grid_chisquare
################
# Find cross correlation RV
# Should run though all models and find best rv to apply uniformly
rvoffset, cc_max = xcorr_peak(obs_spec, mod_spec, plot=False)
if verbose:
print("Cross correlation RV = {}".format(rvoffset))
print("Cross correlation max = {}".format(cc_max))
model_xcorr_vals[ii] = cc_max
model_xcorr_rv_vals[ii] = rvoffset
###################
npix = obs_flux.shape[0]
# print("bhm shape", bhm_grid_chisquare.shape)
save_full_bhm_chisqr(save_name, params, gammas, bhm_grid_chisquare, arbitrary_norms,
npix, rvoffset)
return (model_chisqr_vals, model_xcorr_vals, model_xcorr_rv_vals,
bhm_grid_chisqr_vals, bhm_grid_gamma, full_bhm_grid_chisquare)
def save_full_bhm_chisqr(name: str, params1: List[Union[int, float]], gammas: ndarray, bhm_grid_chisquare: ndarray,
arbitrary_norms: ndarray, npix: int, xcorr_value: Optional[int] = None) -> None:
"""Save the bhm chisqr values to a cvs."""
assert gammas.shape == bhm_grid_chisquare.shape
data = {"gamma": gammas, "chi2": bhm_grid_chisquare.ravel(), "arbnorm": arbitrary_norms.ravel()}
df = pd.DataFrame(data=data)
df["teff_1"] = params1[0]
df["logg_1"] = params1[1]
df["feh_1"] = params1[2]
df["npix"] = npix
if xcorr_value is None:
xcorr_value = -9999999
df["xcorr"] = xcorr_value
columns = ["teff_1", "logg_1", "feh_1", "gamma", "npix", "chi2", "arbnorm", "xcorr"]
df[columns].to_csv(name, sep=',', index=False, mode="a") # Append to values cvs
return None
def bhm_helper_function(star: str, obsnum: Union[int, str], chip: int, skip_params: bool = False) -> Tuple[
str, Dict[str, Union[str, float, List[Union[str, float]]]], str]:
return sim_helper_function(star, obsnum, chip, skip_params=skip_params, mode="bhm")
def get_bhm_model_pars(params: Dict[str, Union[int, float]], method: str = "close") -> List[
List[Union[int64, float64]]]:
method = method.lower()
host_params = [params["temp"], params["logg"], params["fe_h"]]
closest_host_model = closest_model_params(*host_params)
if method == "config":
model_pars = list(generate_bhm_config_params(closest_host_model))
elif method == "close":
# Model parameters to try iterate over.
model_pars = list(generate_close_params(closest_host_model, small=True))
else:
raise ValueError("The method '{0}' is not valid".format(method))
return model_pars
| mit |
TariqAHassan/BioVida | tests/images/biovida_images_tests.py | 1 | 2865 | """
BioVida-Images Subpackage Unit Testing
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Note: 'medpix.png' is simply a blank images of same correct size.
# To test the OpeniImageProcessing() class, it will need to be replaced
# with an actual MedPix image.
import os
import sys
import unittest
import pandas as pd
from os.path import join as os_join
# Allow access to modules
sys.path.insert(0, os.path.abspath("."))
sys.path.insert(0, os.path.abspath("../"))
from biovida import images
from biovida.support_tools.support_tools import items_null
from biovida.images._interface_support.openi.openi_text_processing import openi_raw_extract_and_clean
data_path = os_join(str(os.getcwd()).split("/tests")[0], "tests/images/data")
raw_openi_data_df = pd.read_pickle(os_join(data_path, "sample_records_raw.p"))
class OpeniInterfaceTests(unittest.TestCase):
"""
Unit Tests for the Images Subpackage.
"""
def test_cleaning_raw(self):
"""Test Extracting Features From Raw Open-i Data & Cleaning it."""
cleaned_df = openi_raw_extract_and_clean(raw_openi_data_df, clinical_cases_only=False,
verbose=False, cache_path=data_path)
# Tests for the newly generate columns
expected_new_columns = ('diagnosis', 'imaging_modality_from_text', 'sex',
'illness_duration_years', 'modality_full', 'image_problems_from_text',
'parsed_abstract', 'image_id_short', 'age', 'ethnicity', 'image_plane')
new_columns = set(cleaned_df.columns) - set(raw_openi_data_df.columns)
# - Number of new columns
self.assertEqual(len(new_columns) >= 11, True)
# - Checks that least all `expected_new_columns` columns are in `new_columns`,
# However, this will not fail if additional columns are added.
self.assertEqual(all(e in new_columns for e in expected_new_columns), True)
# Test for only floats
for c in ('illness_duration_years', 'age'):
float_test = all(isinstance(i, float) for i in cleaned_df[c])
self.assertEqual(float_test, True)
# Test for only strings
for c in ('diagnosis', 'imaging_modality_from_text', 'sex',
'modality_full', 'image_plane', 'image_id_short'):
string_test = all(isinstance(i, str) or items_null(i) for i in cleaned_df[c])
self.assertEqual(string_test, True)
# Test for only dictionaries
dict_test = all(isinstance(i, dict) or items_null(i) for i in cleaned_df['parsed_abstract'])
self.assertEqual(dict_test, True)
# Test for tuples
tuple_test = all(isinstance(i, tuple) or items_null(i) for i in cleaned_df['image_problems_from_text'])
self.assertEqual(tuple_test, True)
unittest.main()
| bsd-3-clause |
mikebenfield/scikit-learn | examples/linear_model/plot_logistic_multinomial.py | 50 | 2480 | """
====================================================
Plot multinomial and One-vs-Rest Logistic Regression
====================================================
Plot decision surface of multinomial and One-vs-Rest Logistic Regression.
The hyperplanes corresponding to the three One-vs-Rest (OVR) classifiers
are represented by the dashed lines.
"""
print(__doc__)
# Authors: Tom Dupre la Tour <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.linear_model import LogisticRegression
# make 3-class dataset for classification
centers = [[-5, 0], [0, 1.5], [5, -1]]
X, y = make_blobs(n_samples=1000, centers=centers, random_state=40)
transformation = [[0.4, 0.2], [-0.4, 1.2]]
X = np.dot(X, transformation)
for multi_class in ('multinomial', 'ovr'):
clf = LogisticRegression(solver='sag', max_iter=100, random_state=42,
multi_class=multi_class).fit(X, y)
# print the training scores
print("training score : %.3f (%s)" % (clf.score(X, y), multi_class))
# create a mesh to plot in
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.title("Decision surface of LogisticRegression (%s)" % multi_class)
plt.axis('tight')
# Plot also the training points
colors = "bry"
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, cmap=plt.cm.Paired)
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.show()
| bsd-3-clause |
ilo10/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 127 | 37672 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.validation import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset.
for loss in ('deviance', 'exponential'):
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert np.any(deviance_decrease >= 0.0), \
"Train deviance does not monotonically decrease."
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def test_classification_synthetic():
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
for loss in ('deviance', 'exponential'):
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.09, \
"GB(loss={}) failed with error {}".format(loss, error_rate)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.08, ("Stochastic GradientBoostingClassifier(loss={}) "
"failed with error {}".format(loss, error_rate))
def test_boston():
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
for loss in ("ls", "lad", "huber"):
for subsample in (1.0, 0.5):
last_y_pred = None
for i, sample_weight in enumerate(
(None, np.ones(len(boston.target)),
2 * np.ones(len(boston.target)))):
clf = GradientBoostingRegressor(n_estimators=100, loss=loss,
max_depth=4, subsample=subsample,
min_samples_split=1,
random_state=1)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert mse < 6.0, "Failed with loss %s and " \
"mse = %.4f" % (loss, mse)
if last_y_pred is not None:
np.testing.assert_array_almost_equal(
last_y_pred, y_pred,
err_msg='pred_%d doesnt match last pred_%d for loss %r and subsample %r. '
% (i, i - 1, loss, subsample))
last_y_pred = y_pred
def test_iris():
# Check consistency on dataset iris.
for subsample in (1.0, 0.5):
for sample_weight in (None, np.ones(len(iris.target))):
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=subsample)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (subsample, score)
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 5.0, "Failed on Friedman1 with mse = %.4f" % mse
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 1700.0, "Failed on Friedman2 with mse = %.4f" % mse
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.015, "Failed on Friedman3 with mse = %.4f" % mse
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1)
clf.fit(X, y)
#feature_importances = clf.feature_importances_
assert_true(hasattr(clf, 'feature_importances_'))
X_new = clf.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = clf.feature_importances_ > clf.feature_importances_.mean()
assert_array_almost_equal(X_new, X[:, feature_mask])
# true feature importance ranking
# true_ranking = np.array([3, 1, 8, 2, 10, 9, 4, 11, 0, 6, 7, 5, 12])
# assert_array_equal(true_ranking, feature_importances.argsort())
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
from scipy import sparse
X_sparse = sparse.csr_matrix(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(TypeError, clf.fit, X_sparse, y)
clf = GradientBoostingClassifier().fit(X, y)
assert_raises(TypeError, clf.predict, X_sparse)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict(rng.rand(2))
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict(rng.rand(2)))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert clf.oob_improvement_.shape[0] == 100
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (0.5, score)
assert clf.oob_improvement_.shape[0] == clf.n_estimators
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert est.estimators_[0, 0].max_depth == 1
for i in range(1, 11):
assert est.estimators_[-i, 0].max_depth == 2
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_min_weight_leaf():
# Regression test for issue #4447
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1],
]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = GradientBoostingRegressor(n_estimators=5, min_weight_fraction_leaf=0.1)
gb.fit(X, y, sample_weight=sample_weight)
assert_true(gb.predict([[1, 0]])[0] > 0.5)
assert_almost_equal(gb.estimators_[0, 0].splitter.min_weight_leaf, 0.2)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
| bsd-3-clause |
victor-prado/broker-manager | environment/lib/python3.5/site-packages/pandas/tests/frame/test_alter_axes.py | 7 | 26538 | # -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
from pandas.compat import lrange
from pandas import (DataFrame, Series, Index, MultiIndex,
RangeIndex)
import pandas as pd
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameAlterAxes(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_set_index(self):
idx = Index(np.arange(len(self.mixed_frame)))
# cache it
_ = self.mixed_frame['foo'] # noqa
self.mixed_frame.index = idx
self.assertIs(self.mixed_frame['foo'].index, idx)
with assertRaisesRegexp(ValueError, 'Length mismatch'):
self.mixed_frame.index = idx[::2]
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame({'A': [1.1, 2.2, 3.3], 'B': [5.0, 6.1, 7.2]},
index=[2010, 2011, 2012])
expected = df.ix[2010]
new_index = df.index.astype(np.int32)
df.index = new_index
result = df.ix[2010]
assert_series_equal(result, expected)
def test_set_index2(self):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'],
'B': ['one', 'two', 'three', 'one', 'two'],
'C': ['a', 'b', 'c', 'd', 'e'],
'D': np.random.randn(5),
'E': np.random.randn(5)})
# new object, single-column
result = df.set_index('C')
result_nodrop = df.set_index('C', drop=False)
index = Index(df['C'], name='C')
expected = df.ix[:, ['A', 'B', 'D', 'E']]
expected.index = index
expected_nodrop = df.copy()
expected_nodrop.index = index
assert_frame_equal(result, expected)
assert_frame_equal(result_nodrop, expected_nodrop)
self.assertEqual(result.index.name, index.name)
# inplace, single
df2 = df.copy()
df2.set_index('C', inplace=True)
assert_frame_equal(df2, expected)
df3 = df.copy()
df3.set_index('C', drop=False, inplace=True)
assert_frame_equal(df3, expected_nodrop)
# create new object, multi-column
result = df.set_index(['A', 'B'])
result_nodrop = df.set_index(['A', 'B'], drop=False)
index = MultiIndex.from_arrays([df['A'], df['B']], names=['A', 'B'])
expected = df.ix[:, ['C', 'D', 'E']]
expected.index = index
expected_nodrop = df.copy()
expected_nodrop.index = index
assert_frame_equal(result, expected)
assert_frame_equal(result_nodrop, expected_nodrop)
self.assertEqual(result.index.names, index.names)
# inplace
df2 = df.copy()
df2.set_index(['A', 'B'], inplace=True)
assert_frame_equal(df2, expected)
df3 = df.copy()
df3.set_index(['A', 'B'], drop=False, inplace=True)
assert_frame_equal(df3, expected_nodrop)
# corner case
with assertRaisesRegexp(ValueError, 'Index has duplicate keys'):
df.set_index('A', verify_integrity=True)
# append
result = df.set_index(['A', 'B'], append=True)
xp = df.reset_index().set_index(['index', 'A', 'B'])
xp.index.names = [None, 'A', 'B']
assert_frame_equal(result, xp)
# append to existing multiindex
rdf = df.set_index(['A'], append=True)
rdf = rdf.set_index(['B', 'C'], append=True)
expected = df.set_index(['A', 'B', 'C'], append=True)
assert_frame_equal(rdf, expected)
# Series
result = df.set_index(df.C)
self.assertEqual(result.index.name, 'C')
def test_set_index_nonuniq(self):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'],
'B': ['one', 'two', 'three', 'one', 'two'],
'C': ['a', 'b', 'c', 'd', 'e'],
'D': np.random.randn(5),
'E': np.random.randn(5)})
with assertRaisesRegexp(ValueError, 'Index has duplicate keys'):
df.set_index('A', verify_integrity=True, inplace=True)
self.assertIn('A', df)
def test_set_index_bug(self):
# GH1590
df = DataFrame({'val': [0, 1, 2], 'key': ['a', 'b', 'c']})
df2 = df.select(lambda indx: indx >= 1)
rs = df2.set_index('key')
xp = DataFrame({'val': [1, 2]},
Index(['b', 'c'], name='key'))
assert_frame_equal(rs, xp)
def test_set_index_pass_arrays(self):
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
# multiple columns
result = df.set_index(['A', df['B'].values], drop=False)
expected = df.set_index(['A', 'B'], drop=False)
# TODO should set_index check_names ?
assert_frame_equal(result, expected, check_names=False)
def test_construction_with_categorical_index(self):
ci = tm.makeCategoricalIndex(10)
# with Categorical
df = DataFrame({'A': np.random.randn(10),
'B': ci.values})
idf = df.set_index('B')
str(idf)
tm.assert_index_equal(idf.index, ci, check_names=False)
self.assertEqual(idf.index.name, 'B')
# from a CategoricalIndex
df = DataFrame({'A': np.random.randn(10),
'B': ci})
idf = df.set_index('B')
str(idf)
tm.assert_index_equal(idf.index, ci, check_names=False)
self.assertEqual(idf.index.name, 'B')
idf = df.set_index('B').reset_index().set_index('B')
str(idf)
tm.assert_index_equal(idf.index, ci, check_names=False)
self.assertEqual(idf.index.name, 'B')
new_df = idf.reset_index()
new_df.index = df.B
tm.assert_index_equal(new_df.index, ci, check_names=False)
self.assertEqual(idf.index.name, 'B')
def test_set_index_cast_datetimeindex(self):
df = DataFrame({'A': [datetime(2000, 1, 1) + timedelta(i)
for i in range(1000)],
'B': np.random.randn(1000)})
idf = df.set_index('A')
tm.assertIsInstance(idf.index, pd.DatetimeIndex)
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
i = (pd.DatetimeIndex(
pd.tseries.tools.to_datetime(['2013-1-1 13:00',
'2013-1-2 14:00'], errors="raise"))
.tz_localize('US/Pacific'))
df = DataFrame(np.random.randn(2, 1), columns=['A'])
expected = Series(np.array([pd.Timestamp('2013-01-01 13:00:00-0800',
tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00-0800',
tz='US/Pacific')],
dtype="object"))
# convert index to series
result = Series(i)
assert_series_equal(result, expected)
# assignt to frame
df['B'] = i
result = df['B']
assert_series_equal(result, expected, check_names=False)
self.assertEqual(result.name, 'B')
# keep the timezone
result = i.to_series(keep_tz=True)
assert_series_equal(result.reset_index(drop=True), expected)
# convert to utc
df['C'] = i.to_series().reset_index(drop=True)
result = df['C']
comp = pd.DatetimeIndex(expected.values).copy()
comp.tz = None
self.assert_numpy_array_equal(result.values, comp.values)
# list of datetimes with a tz
df['D'] = i.to_pydatetime()
result = df['D']
assert_series_equal(result, expected, check_names=False)
self.assertEqual(result.name, 'D')
# GH 6785
# set the index manually
import pytz
df = DataFrame(
[{'ts': datetime(2014, 4, 1, tzinfo=pytz.utc), 'foo': 1}])
expected = df.set_index('ts')
df.index = df['ts']
df.pop('ts')
assert_frame_equal(df, expected)
# GH 3950
# reset_index with single level
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern']:
idx = pd.date_range('1/1/2011', periods=5,
freq='D', tz=tz, name='idx')
df = pd.DataFrame(
{'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)
expected = pd.DataFrame({'idx': [datetime(2011, 1, 1),
datetime(2011, 1, 2),
datetime(2011, 1, 3),
datetime(2011, 1, 4),
datetime(2011, 1, 5)],
'a': range(5),
'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx', 'a', 'b'])
expected['idx'] = expected['idx'].apply(
lambda d: pd.Timestamp(d, tz=tz))
assert_frame_equal(df.reset_index(), expected)
def test_set_index_timezone(self):
# GH 12358
# tz-aware Series should retain the tz
i = pd.to_datetime(["2014-01-01 10:10:10"],
utc=True).tz_convert('Europe/Rome')
df = DataFrame({'i': i})
self.assertEqual(df.set_index(i).index[0].hour, 11)
self.assertEqual(pd.DatetimeIndex(pd.Series(df.i))[0].hour, 11)
self.assertEqual(df.set_index(df.i).index[0].hour, 11)
def test_set_index_dst(self):
di = pd.date_range('2006-10-29 00:00:00', periods=3,
req='H', tz='US/Pacific')
df = pd.DataFrame(data={'a': [0, 1, 2], 'b': [3, 4, 5]},
index=di).reset_index()
# single level
res = df.set_index('index')
exp = pd.DataFrame(data={'a': [0, 1, 2], 'b': [3, 4, 5]},
index=pd.Index(di, name='index'))
tm.assert_frame_equal(res, exp)
# GH 12920
res = df.set_index(['index', 'a'])
exp_index = pd.MultiIndex.from_arrays([di, [0, 1, 2]],
names=['index', 'a'])
exp = pd.DataFrame({'b': [3, 4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp)
def test_set_index_multiindexcolumns(self):
columns = MultiIndex.from_tuples([('foo', 1), ('foo', 2), ('bar', 1)])
df = DataFrame(np.random.randn(3, 3), columns=columns)
rs = df.set_index(df.columns[0])
xp = df.ix[:, 1:]
xp.index = df.ix[:, 0].values
xp.index.names = [df.columns[0]]
assert_frame_equal(rs, xp)
def test_set_index_empty_column(self):
# #1971
df = DataFrame([
dict(a=1, p=0),
dict(a=2, m=10),
dict(a=3, m=11, p=20),
dict(a=4, m=12, p=21)
], columns=('a', 'm', 'p', 'x'))
# it works!
result = df.set_index(['a', 'x'])
repr(result)
def test_set_columns(self):
cols = Index(np.arange(len(self.mixed_frame.columns)))
self.mixed_frame.columns = cols
with assertRaisesRegexp(ValueError, 'Length mismatch'):
self.mixed_frame.columns = cols[::2]
# Renaming
def test_rename(self):
mapping = {
'A': 'a',
'B': 'b',
'C': 'c',
'D': 'd'
}
renamed = self.frame.rename(columns=mapping)
renamed2 = self.frame.rename(columns=str.lower)
assert_frame_equal(renamed, renamed2)
assert_frame_equal(renamed2.rename(columns=str.upper),
self.frame, check_names=False)
# index
data = {
'A': {'foo': 0, 'bar': 1}
}
# gets sorted alphabetical
df = DataFrame(data)
renamed = df.rename(index={'foo': 'bar', 'bar': 'foo'})
tm.assert_index_equal(renamed.index, pd.Index(['foo', 'bar']))
renamed = df.rename(index=str.upper)
tm.assert_index_equal(renamed.index, pd.Index(['BAR', 'FOO']))
# have to pass something
self.assertRaises(TypeError, self.frame.rename)
# partial columns
renamed = self.frame.rename(columns={'C': 'foo', 'D': 'bar'})
tm.assert_index_equal(renamed.columns,
pd.Index(['A', 'B', 'foo', 'bar']))
# other axis
renamed = self.frame.T.rename(index={'C': 'foo', 'D': 'bar'})
tm.assert_index_equal(renamed.index,
pd.Index(['A', 'B', 'foo', 'bar']))
# index with name
index = Index(['foo', 'bar'], name='name')
renamer = DataFrame(data, index=index)
renamed = renamer.rename(index={'foo': 'bar', 'bar': 'foo'})
tm.assert_index_equal(renamed.index,
pd.Index(['bar', 'foo'], name='name'))
self.assertEqual(renamed.index.name, renamer.index.name)
# MultiIndex
tuples_index = [('foo1', 'bar1'), ('foo2', 'bar2')]
tuples_columns = [('fizz1', 'buzz1'), ('fizz2', 'buzz2')]
index = MultiIndex.from_tuples(tuples_index, names=['foo', 'bar'])
columns = MultiIndex.from_tuples(
tuples_columns, names=['fizz', 'buzz'])
renamer = DataFrame([(0, 0), (1, 1)], index=index, columns=columns)
renamed = renamer.rename(index={'foo1': 'foo3', 'bar2': 'bar3'},
columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'})
new_index = MultiIndex.from_tuples([('foo3', 'bar1'),
('foo2', 'bar3')],
names=['foo', 'bar'])
new_columns = MultiIndex.from_tuples([('fizz3', 'buzz1'),
('fizz2', 'buzz3')],
names=['fizz', 'buzz'])
self.assert_index_equal(renamed.index, new_index)
self.assert_index_equal(renamed.columns, new_columns)
self.assertEqual(renamed.index.names, renamer.index.names)
self.assertEqual(renamed.columns.names, renamer.columns.names)
def test_rename_nocopy(self):
renamed = self.frame.rename(columns={'C': 'foo'}, copy=False)
renamed['foo'] = 1.
self.assertTrue((self.frame['C'] == 1.).all())
def test_rename_inplace(self):
self.frame.rename(columns={'C': 'foo'})
self.assertIn('C', self.frame)
self.assertNotIn('foo', self.frame)
c_id = id(self.frame['C'])
frame = self.frame.copy()
frame.rename(columns={'C': 'foo'}, inplace=True)
self.assertNotIn('C', frame)
self.assertIn('foo', frame)
self.assertNotEqual(id(frame['foo']), c_id)
def test_rename_bug(self):
# GH 5344
# rename set ref_locs, and set_index was not resetting
df = DataFrame({0: ['foo', 'bar'], 1: ['bah', 'bas'], 2: [1, 2]})
df = df.rename(columns={0: 'a'})
df = df.rename(columns={1: 'b'})
df = df.set_index(['a', 'b'])
df.columns = ['2001-01-01']
expected = DataFrame([[1], [2]],
index=MultiIndex.from_tuples(
[('foo', 'bah'), ('bar', 'bas')],
names=['a', 'b']),
columns=['2001-01-01'])
assert_frame_equal(df, expected)
def test_reorder_levels(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]],
names=['L0', 'L1', 'L2'])
df = DataFrame({'A': np.arange(6), 'B': np.arange(6)}, index=index)
# no change, position
result = df.reorder_levels([0, 1, 2])
assert_frame_equal(df, result)
# no change, labels
result = df.reorder_levels(['L0', 'L1', 'L2'])
assert_frame_equal(df, result)
# rotate, position
result = df.reorder_levels([1, 2, 0])
e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],
labels=[[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0]],
names=['L1', 'L2', 'L0'])
expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},
index=e_idx)
assert_frame_equal(result, expected)
result = df.reorder_levels([0, 0, 0])
e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']],
labels=[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
names=['L0', 'L0', 'L0'])
expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},
index=e_idx)
assert_frame_equal(result, expected)
result = df.reorder_levels(['L0', 'L0', 'L0'])
assert_frame_equal(result, expected)
def test_reset_index(self):
stacked = self.frame.stack()[::2]
stacked = DataFrame({'foo': stacked, 'bar': stacked})
names = ['first', 'second']
stacked.index.names = names
deleveled = stacked.reset_index()
for i, (lev, lab) in enumerate(zip(stacked.index.levels,
stacked.index.labels)):
values = lev.take(lab)
name = names[i]
tm.assert_index_equal(values, Index(deleveled[name]))
stacked.index.names = [None, None]
deleveled2 = stacked.reset_index()
tm.assert_series_equal(deleveled['first'], deleveled2['level_0'],
check_names=False)
tm.assert_series_equal(deleveled['second'], deleveled2['level_1'],
check_names=False)
# default name assigned
rdf = self.frame.reset_index()
exp = pd.Series(self.frame.index.values, name='index')
self.assert_series_equal(rdf['index'], exp)
# default name assigned, corner case
df = self.frame.copy()
df['index'] = 'foo'
rdf = df.reset_index()
exp = pd.Series(self.frame.index.values, name='level_0')
self.assert_series_equal(rdf['level_0'], exp)
# but this is ok
self.frame.index.name = 'index'
deleveled = self.frame.reset_index()
self.assert_series_equal(deleveled['index'],
pd.Series(self.frame.index))
self.assert_index_equal(deleveled.index,
pd.Index(np.arange(len(deleveled))))
# preserve column names
self.frame.columns.name = 'columns'
resetted = self.frame.reset_index()
self.assertEqual(resetted.columns.name, 'columns')
# only remove certain columns
frame = self.frame.reset_index().set_index(['index', 'A', 'B'])
rs = frame.reset_index(['A', 'B'])
# TODO should reset_index check_names ?
assert_frame_equal(rs, self.frame, check_names=False)
rs = frame.reset_index(['index', 'A', 'B'])
assert_frame_equal(rs, self.frame.reset_index(), check_names=False)
rs = frame.reset_index(['index', 'A', 'B'])
assert_frame_equal(rs, self.frame.reset_index(), check_names=False)
rs = frame.reset_index('A')
xp = self.frame.reset_index().set_index(['index', 'B'])
assert_frame_equal(rs, xp, check_names=False)
# test resetting in place
df = self.frame.copy()
resetted = self.frame.reset_index()
df.reset_index(inplace=True)
assert_frame_equal(df, resetted, check_names=False)
frame = self.frame.reset_index().set_index(['index', 'A', 'B'])
rs = frame.reset_index('A', drop=True)
xp = self.frame.copy()
del xp['A']
xp = xp.set_index(['B'], append=True)
assert_frame_equal(rs, xp, check_names=False)
def test_reset_index_right_dtype(self):
time = np.arange(0.0, 10, np.sqrt(2) / 2)
s1 = Series((9.81 * time ** 2) / 2,
index=Index(time, name='time'),
name='speed')
df = DataFrame(s1)
resetted = s1.reset_index()
self.assertEqual(resetted['time'].dtype, np.float64)
resetted = df.reset_index()
self.assertEqual(resetted['time'].dtype, np.float64)
def test_reset_index_multiindex_col(self):
vals = np.random.randn(3, 3).astype(object)
idx = ['x', 'y', 'z']
full = np.hstack(([[x] for x in idx], vals))
df = DataFrame(vals, Index(idx, name='a'),
columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])
rs = df.reset_index()
xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],
['', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index(col_fill=None)
xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index(col_level=1, col_fill='blah')
xp = DataFrame(full, columns=[['blah', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
df = DataFrame(vals,
MultiIndex.from_arrays([[0, 1, 2], ['x', 'y', 'z']],
names=['d', 'a']),
columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])
rs = df.reset_index('a', )
xp = DataFrame(full, Index([0, 1, 2], name='d'),
columns=[['a', 'b', 'b', 'c'],
['', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index('a', col_fill=None)
xp = DataFrame(full, Index(lrange(3), name='d'),
columns=[['a', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index('a', col_fill='blah', col_level=1)
xp = DataFrame(full, Index(lrange(3), name='d'),
columns=[['blah', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
def test_reset_index_with_datetimeindex_cols(self):
# GH5818
#
df = pd.DataFrame([[1, 2], [3, 4]],
columns=pd.date_range('1/1/2013', '1/2/2013'),
index=['A', 'B'])
result = df.reset_index()
expected = pd.DataFrame([['A', 1, 2], ['B', 3, 4]],
columns=['index', datetime(2013, 1, 1),
datetime(2013, 1, 2)])
assert_frame_equal(result, expected)
def test_reset_index_range(self):
# GH 12071
df = pd.DataFrame([[0, 0], [1, 1]], columns=['A', 'B'],
index=RangeIndex(stop=2))
result = df.reset_index()
tm.assertIsInstance(result.index, RangeIndex)
expected = pd.DataFrame([[0, 0, 0], [1, 1, 1]],
columns=['index', 'A', 'B'],
index=RangeIndex(stop=2))
assert_frame_equal(result, expected)
def test_set_index_names(self):
df = pd.util.testing.makeDataFrame()
df.index.name = 'name'
self.assertEqual(df.set_index(df.index).index.names, ['name'])
mi = MultiIndex.from_arrays(df[['A', 'B']].T.values, names=['A', 'B'])
mi2 = MultiIndex.from_arrays(df[['A', 'B', 'A', 'B']].T.values,
names=['A', 'B', 'A', 'B'])
df = df.set_index(['A', 'B'])
self.assertEqual(df.set_index(df.index).index.names, ['A', 'B'])
# Check that set_index isn't converting a MultiIndex into an Index
self.assertTrue(isinstance(df.set_index(df.index).index, MultiIndex))
# Check actual equality
tm.assert_index_equal(df.set_index(df.index).index, mi)
# Check that [MultiIndex, MultiIndex] yields a MultiIndex rather
# than a pair of tuples
self.assertTrue(isinstance(df.set_index(
[df.index, df.index]).index, MultiIndex))
# Check equality
tm.assert_index_equal(df.set_index([df.index, df.index]).index, mi2)
def test_rename_objects(self):
renamed = self.mixed_frame.rename(columns=str.upper)
self.assertIn('FOO', renamed)
self.assertNotIn('foo', renamed)
def test_assign_columns(self):
self.frame['hi'] = 'there'
frame = self.frame.copy()
frame.columns = ['foo', 'bar', 'baz', 'quux', 'foo2']
assert_series_equal(self.frame['C'], frame['baz'], check_names=False)
assert_series_equal(self.frame['hi'], frame['foo2'], check_names=False)
def test_set_index_preserve_categorical_dtype(self):
# GH13743, GH13854
df = DataFrame({'A': [1, 2, 1, 1, 2],
'B': [10, 16, 22, 28, 34],
'C1': pd.Categorical(list("abaab"),
categories=list("bac"),
ordered=False),
'C2': pd.Categorical(list("abaab"),
categories=list("bac"),
ordered=True)})
for cols in ['C1', 'C2', ['A', 'C1'], ['A', 'C2'], ['C1', 'C2']]:
result = df.set_index(cols).reset_index()
result = result.reindex(columns=df.columns)
tm.assert_frame_equal(result, df)
| mit |
cod3monk/kerncraft | kerncraft/roofline-plot.py | 2 | 2490 | #!/usr/bin/env python3
from pprint import pprint
import matplotlib.pyplot as plt
from ruamel import yaml
from .prefixedunit import PrefixedUnit
def frange(start, stop, step=1.0):
f = start
while f < stop:
f += step
yield f
# Input (usually from ECM model)
result = {
'min performance': 11175000000.0, 'bottleneck level': 2,
'mem bottlenecks': [{'performance': PrefixedUnit(24474545454.545452, '', 'FLOP/s'),
'bandwidth': PrefixedUnit(89.74, u'G', u'B/s'),
'arithmetic intensity': 0.2727272727272727,
'bw kernel': 'triad', 'level': 'L1-L2'},
{'performance': PrefixedUnit(12957000000.0, '',
'FLOP/s'), 'bandwidth': PrefixedUnit(43.19, u'G', u'B/s'),
'arithmetic intensity': 0.3, 'bw kernel': 'triad', 'level': 'L2-L3'},
{'performance': PrefixedUnit(11175000000.0, '', 'FLOP/s'),
'bandwidth': PrefixedUnit(22.35, u'G', u'B/s'),
'arithmetic intensity': 0.5, 'bw kernel': 'triad', 'level': 'L3-MEM'}]}
machine = yaml.load(open('machine-files/emmy.yaml'))
max_flops = machine['clock']*sum(machine['FLOPs per cycle']['DP'].values())
max_flops.unit = "FLOP/s"
pprint(result)
pprint(max_flops)
# Plot configuration
height = 0.8
fig = plt.figure(frameon=False)
ax = fig.add_subplot(1, 1, 1)
yticks_labels = []
yticks = []
xticks_labels = []
xticks = [2.**i for i in range(-4, 4)]
ax.set_xlabel('arithmetic intensity [FLOP/byte]')
ax.set_ylabel('performance [FLOP/s]')
# Upper bound
x = list(frange(min(xticks), max(xticks), 0.01))
bw = float(result['mem bottlenecks'][result['bottleneck level']]['bandwidth'])
ax.plot(x, [min(bw*x, float(max_flops)) for x in x])
# Code location
perf = min(
float(max_flops),
float(result['mem bottlenecks'][result['bottleneck level']]['performance']))
arith_intensity = result['mem bottlenecks'][result['bottleneck level']]['arithmetic intensity']
ax.plot(arith_intensity, perf, 'r+', markersize=12, markeredgewidth=4)
# ax.tick_params(axis='y', which='both', left='off', right='off')
# ax.tick_params(axis='x', which='both', top='off')
ax.set_xscale('log', basex=2)
ax.set_yscale('log')
ax.set_xlim(min(xticks), max(xticks))
# ax.set_yticks([perf, float(max_flops)])
ax.set_xticks(xticks+[arith_intensity])
ax.grid(axis='x', alpha=0.7, linestyle='--')
# fig.savefig('out.pdf')
plt.show()
| agpl-3.0 |
CentralLabFacilities/m3meka | python/scripts/tools/m3qa_preisach.py | 2 | 7452 | #! /usr/bin/python
import time
import m3.toolbox as m3t
import Numeric as nu
import math
import yaml
import m3.rt_proxy as m3p
import m3.toolbox as m3t
import m3.component_factory as mcf
#Image stuff
from PIL import Image
from PIL import ImageDraw
import sys
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import os, sys
import Tkinter
import Image, ImageTk
import random
from m3qa.calibrate_actuator_ec_r3 import dpm3_thread
# ####################################################
#L+1: number of levels in grid dimension
L=50
#drawing width
dw=10
# ####################################################
import pygame
import Image
from pygame.locals import *
import sys
pygame.init()
window = pygame.display.set_mode((L*dw,L*dw))
screen = pygame.display.get_surface()
events = pygame.event.get()
im = Image.new("RGB", (L*dw,L*dw), (0,0,0))
draw = ImageDraw.Draw(im)
def draw_plane(u):
for i in range(L,0,-1):
for j in range(1,i+1):
draw_cell(i,j,u)
pg_img = pygame.image.frombuffer(im.tostring(), im.size, im.mode)
screen.blit(pg_img, (0,0))
pygame.display.flip()
def in_cell(i,j,u):
k=cell_to_idx(i,j)
return(u>a[k]-cw and u<a[k]) and (u>b[k] and u<b[k]+cw)
def draw_cell(i,j,u):
cy=L*dw-i*dw-dw/2.0
cx=j*dw-dw/2.0
k=cell_to_idx(i,j)
if in_cell(i,j,u):
draw.rectangle(((cx-dw/2.0)+1,(cy-dw/2.0)+1,(cx+dw/2.0)-1,(cy+dw/2.0)-1),fill=(0,0,255),outline=(255,255,0))
elif g[k]>0:
draw.rectangle(((cx-dw/2.0)+1,(cy-dw/2.0)+1,(cx+dw/2.0)-1,(cy+dw/2.0)-1),fill=(0,0,0),outline=(255,255,0))
else:
draw.rectangle((cx-dw/2.0,cy-dw/2.0,cx+dw/2.0,cy+dw/2.0),fill=(0,0,0),outline=(255,0,0))
# ####################################################
#u : input variable
#v: output variable
#a>b by definition
#mu(b,a)=0 if b<bo or a>ao (outer bounds of Preisach plane)
#w(t): Preisach operator (omega): integral over plane = sum(g(a,b)==1)-sum(g(a,b)==-1)
#K: number of elements in column vector representation
K=int(L*(L+1)/2.0)
#a: upper bound (alpha)
a=nu.array([0.0]*K)
#b: lower bound (beta)
b=nu.array([0.0]*K)
#mu: density function mu(b,a)
mu=nu.array([0.0]*K)
#g: Preisach hysteron (gamma): v=g_op(b,a,u)=+1 if u>a, -1 if u<b, g if b<=u<=a
#Start in full negative state: u(t<0)<b0
g=nu.array([-1.0]*K)
#a0: maximum input value
#a0=9
#b0: minimum input value
#b0=1
u_max=10000
u_min=-10000
#Cell width
cw=(u_max-u_min)/L
print 'Cell width is',cw,'mNm'
#un: input sequence of n discretized inputs
#n=?
def build():
for i in range(L,0,-1):
for j in range(1,i+1):
k=cell_to_idx(i,j)
alpha=u_min+i*cw
beta=u_min+(j-1)*cw
a[k]=alpha
b[k]=beta
#print 'i',i,'j',j,'a',alpha,'b',beta
def discretize(u):
if u<=u_min:
return 1
if u>=u_max:
return L
return math.ceil((u-u_min)/cw)
def print_plane():
for i in range(L,0,-1):
for j in range(1,i+1):
v=g[cell_to_idx(i,j)]
k=cell_to_idx(i,j)
print '(',v,')',
#print '(',b[k],a[k],')',
#print '(',i,',',j,')', #row, col
print
def cell_to_idx(i,j):
return int(i*(i+1)/2-(i-j+1))
#Hysteron function: input, beta, alpha, and current
#Made inclusive of boundaries. Does'nt follow convention
#But otherwise plane borders are ignored. What is the correct way to handle this?
def g_op(u,bb,aa,cc):
if u>u_max or u<u_min:
return 0.0
if u>=aa:
return 1.0
if u<=bb:
return -1.0
return cc
def step_input(u):
for k in range(K):
y=g_op(u,b[k],a[k],g[k])
g[k]=y
slew=m3t.M3Slew()
def ramp_to_torque(des,scope):
act.set_mode_torque()
if slew.val==0:
slew.val=act.get_torque_mNm()
rate=500.0#100mNm/step, ~30steps/s, 3Nm/s, ~7s full range
x=slew.step(des,rate)
ut=[]
wt=[]
gt=[]
while True:
act.set_torque_mNm(x)
proxy.step()
time.sleep(0.03)
ut.append(act.get_torque_mNm())
wt.append(dpm3.get_load_mNm())
step_input(ut[-1])
draw_plane(u)
#gt.append(list(g))
scope.plot(ut[-1],x)
x=slew.step(des,rate)
if x==des: #Wait to settle
for i in range(20):
proxy.step()
time.sleep(0.1)
x=slew.step(des,rate)
ut.append(act.get_torque_mNm())
wt.append(dpm3.get_load_mNm())
step_input(ut[-1])
draw_plane(u)
#gt.append(list(g))
scope.plot(ut[-1],x)
break
#print x,des,ut[-1]
#act.set_mode_off()
proxy.step()
return ut,wt,gt
def triangle_wave():
scope=m3t.M3Scope2(xwidth=100,yrange=None)
des=[]
cc=1.0
scale=5000
for i in range(10):
des.append(cc)
cc=cc*-1.0
print 'Enable power. Hit enter to continue'
raw_input()
for ii in range(len(des)):
print ii,'Des: ',des[ii]
ramp_to_torque(des[ii]*scale,scope)
def fill_cells_random():
scope=m3t.M3Scope2(xwidth=100,yrange=None)
ns=30#5#K*2
#Build desired vector
log={'ns':ns,'type':'fill_cells_random',
'actuator':'MA12J1','L':L,'K':K,'CW':cw,'a':list(a),'b':list(b),'u_max':u_max,'u_min':u_min,'ut':[],'wt':[],'gt':[],'des':[]}
des=[u_min]
for nn in range(ns):
while True:
x=u_min+(u_max-u_min)*(random.random()) #randomly in range
if abs(des[-1]-x)>5*cw: #make excursion at least 5 cells
des.append(x)
break
log['des']=des
print 'Des',des
print 'Enable power. Hit enter to continue'
raw_input()
for ii in range(len(des)):
print ii,'Des: ',des[ii]
uu,ww,gg=ramp_to_torque(des[ii],scope)
print len(uu),len(ww),len(gg)
log['ut'].append(list(uu)) #input
log['wt'].append(list(ww)) #output
log['gt'].append(list(gg)) #hysteron states
print 'Save data [y]?'
if m3t.get_yes_no('y'):
fn=m3t.get_m3_config_path()+'data/preisach_data_random_ma12j1_'+m3t.time_string()+'.yml'
print 'Enter annotation string [None]'
note=m3t.get_string('None')
log['note']=note
f=file(fn,'w')
print 'Saving...',fn
f.write(yaml.safe_dump(log, default_flow_style=False,width=200))
f.close()
return log
def fill_cells_methodical():
#fill by row, for each row-alpha, starting at u_max
#reduce torque to u_min, then increase to row-alpha
#then reduce to column-beta, from alpha down to u_min
#this will fill an entire row. reduce the row index and repeat
for i in range(L,0,-1): #row
ramp_to_torque(u_min)
alpha=a[cell_to_idx[i,0]]
for j in range(L,0,-1):
beta=b[cell_to_idx[i,j]]
des=beta+cw/2.0 #servo to center of cell
ramp_to_torque(des)
u=act.get_torque_mNm()
w=dpm3.get_load_mNm()
ut.append(u)
wt.append(w)
gt.append(list(g))
build()
#print_plane()
draw_plane(0)
dpm3=dpm3_thread()
dpm3.start()
proxy = m3p.M3RtProxy()
proxy.start()
proxy.make_operational_all()
act=mcf.create_component('m3joint_ma12_j1')
pwr=mcf.create_component('m3pwr_pwr015')
proxy.publish_command(pwr)
proxy.subscribe_status(act)
proxy.publish_command(act)
proxy.publish_param(act)
proxy.step()
pwr.set_motor_power_on()
dd=100.0
u=u_min
fill_cells_random()
#triangle_wave()
#try:
#while True:
#print '---------------------'
#print 'f: free-run'
#print 'c: collect-data'
#c=m3t.get_keystroke()
#if c=='f':
#pass
#if c=='c':
#pass
#u=act.get_torque_mNm()
#lmNm=dpm3.get_load_mNm()
#print 'tq: ',u
#proxy.step()
#step_input(u)
#draw_plane(u)
#time.sleep(0.03)
#except (KeyboardInterrupt,EOFError):
#pass
pwr.set_motor_power_off()
dpm3.stop()
proxy.step()
proxy.stop() | mit |
emmatoday/PyClimateGraphs | SeaIce_Combined.py | 1 | 19763 | #!/usr/bin/env python3
"""
Global Sea Ice Extent Graph for 1979-Current
Website : https://github.com/emmatoday/PyClimateGraphs
Author : Emma M - GitHub: @emmatoday
Date : 15 February 2017
This code will download and render a current graph of the global sea ice
extent. Beginning in 1979 until yesterday (based on availability of the data
from the NSIDC).
Requisite data files (in case you need to manually download them):
ftp://sidads.colorado.edu:21/DATASETS/NOAA/G02135/south/daily/data/S_seaice_extent_daily_v2.1.csv
ftp://sidads.colorado.edu:21/DATASETS/NOAA/G02135/north/daily/data/N_seaice_extent_daily_v2.1.csv
"""
# Load all needed libraries
from __future__ import unicode_literals
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.dates as mdates
import matplotlib.colors as c
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from scipy import stats
import datetime as dt
import os
import math
import calendar
import urllib
import shutil
import sys
import logging
from pprint import pprint
# Set up some constants
DATA_PATH = './data' # Path to data directory
OUTPUT_PATH = './output' # Path to output plot images
OUTDATED_DAYS = 3 # Days after which data is considered outdated
PRE_YEAR = 2010 # Year before which the "pre" data is taken
ROLLING_WINDOW = 7
# URLs where we can fetch the data files
DATA_URLS = {
's': 'ftp://sidads.colorado.edu:21/DATASETS/NOAA/G02135/south/daily/data/'
'S_seaice_extent_daily_v2.1.csv',
'n': 'ftp://sidads.colorado.edu:21/DATASETS/NOAA/G02135/north/daily/data/'
'N_seaice_extent_daily_v2.1.csv'
}
def mkdir_if_necessary(dir):
"""
Check if necessary directories exist, creating them if necesary.
"""
try:
os.stat(dir)
except OSError:
os.mkdir(dir)
def data_files_exist():
"""
Check if our data files exist already.
"""
for key, url in DATA_URLS.items():
filename = os.path.split(urllib.parse.urlsplit(url).path)[-1]
if os.path.isfile(os.path.join(DATA_PATH, filename)):
return True
else:
return False
def load_data_files():
"""
Load the data from disk and return the dataframes.
"""
sea_ice_indexes = {}
for key, url in DATA_URLS.items():
filename = os.path.split(urllib.parse.urlsplit(url).path)[-1]
sea_ice_indexes[key] = \
pd.read_csv(os.path.join(DATA_PATH, filename), skiprows=[1])
for key in sea_ice_indexes.keys():
sea_ice_indexes[key].rename(columns=lambda x: x.strip(), inplace=True)
sea_ice_indexes[key]['Date'] = sea_ice_indexes[key]\
.apply(lambda row: dt.date(row['Year'],
row['Month'],
row['Day']), axis=1)
sea_ice_indexes[key]['Date'] = \
pd.to_datetime(sea_ice_indexes[key]['Date'])
minday = np.min(sea_ice_indexes[key]['Date'])
maxday = np.max(sea_ice_indexes[key]['Date'])
newframe = {'Date': pd.Series(pd.date_range(minday, maxday).tolist())}
date_df = pd.DataFrame(newframe)
sea_ice_indexes[key] = pd.merge(left=date_df,
right=sea_ice_indexes[key],
on='Date', how='left')
sea_ice_indexes[key]['Day of Year'] = \
sea_ice_indexes[key] \
.apply(lambda row: row['Date'].timetuple().tm_yday, axis=1)
sea_ice_indexes[key]['Year'] = \
sea_ice_indexes[key] \
.apply(lambda row: row['Date'].year, axis=1)
sea_ice_indexes[key]['Month'] = \
sea_ice_indexes[key] \
.apply(lambda row: row['Date'].month, axis=1)
sea_ice_indexes[key]['Day'] = \
sea_ice_indexes[key] \
.apply(lambda row: row['Date'].day, axis=1)
sea_ice_indexes[key]['Extent'] = \
sea_ice_indexes[key]['Extent'].rolling(window=ROLLING_WINDOW,
center=False).mean()
sea_ice_indexes['s'].rename(columns={'Extent': 'S Extent'}, inplace=True)
sea_ice_indexes['n'].rename(columns={'Extent': 'N Extent'}, inplace=True)
return sea_ice_indexes
def data_is_fresh(sea_ice_indexes, outdated_days=OUTDATED_DAYS):
"""
Check if our data appears to be out of date.
"""
for key, sea_ice_index in sea_ice_indexes.items():
today = dt.date.today()
print('Data is {0} day(s) old'
.format((dt.datetime.now() -
sea_ice_index['Date'].iloc[-1]).days))
if (dt.datetime.now() -
sea_ice_index['Date'].iloc[-1]).days >= outdated_days:
return False
else:
return True
def refresh_data_files():
"""
Update datafiles to the latest available from the data URLs.
"""
for key, url in DATA_URLS.items():
url_path = urllib.parse.urlsplit(url).path
url_filename = os.path.split(url_path)[-1]
filename = os.path.join(DATA_PATH, url_filename)
with urllib.request.urlopen(url) as response:
with open(filename, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
def prep_data_files():
"""
Prepare and load the data files downloading and updating as necessary.
"""
mkdir_if_necessary(DATA_PATH)
mkdir_if_necessary(OUTPUT_PATH)
if data_files_exist():
print('Data files exist')
sea_ice_indexes = load_data_files()
if data_is_fresh(sea_ice_indexes):
print('Data files are up to date')
else:
print('Data files are outdated')
refresh_data_files()
sea_ice_indexes = load_data_files()
print('Data files have been updated')
else:
print('No data files found')
refresh_data_files()
sea_ice_indexes = load_data_files()
print('Data files have been downloaded')
return sea_ice_indexes
def running_mean(x, N=2):
return np.convolve(x, np.ones((N,))/N)[(N-1):]
def plot(gbl_seaice, column, suptitle, light_style=False, infotext='bottom',
filebase='global_sea_ice', ymin=14, ymax=30, pdf=True, png=True,
legend_loc='lower right'):
"""
Plots graph of a given light_style and creates PDF and PNG outputs.
"""
# Set up foreground background colors based on light_style
if light_style:
FG_COLOR = [0, 0, 0]
BG_COLOR = [1, 1, 1]
else:
FG_COLOR = [1, 1, 1]
BG_COLOR = [0, 0, 0]
now = dt.datetime.now()
cur_mon = str(now.month - 1)
cur_day = str(now.day)
cur_year = str(now.year)
iso_date = dt.date.today().isoformat()
# Get all pre-2010 data grouped by day of year
pre_x = gbl_seaice[(gbl_seaice['Year'] < PRE_YEAR) &
(gbl_seaice['Year'] >= 1978)].groupby(['Day of Year'])
# Calculate average extent for each day of the year in the pre-2010 data
mean = pre_x[column].mean()
sigma = pre_x[column].std()
# Get all the data grouped by each year for plotting
year_groups = gbl_seaice.groupby(['Year'])
# Make plot
plt.rc('savefig', facecolor=BG_COLOR)
plt.rc('axes', edgecolor=FG_COLOR)
plt.rc('xtick', color=FG_COLOR)
plt.rc('ytick', color=FG_COLOR)
plt.rc('axes', labelcolor=FG_COLOR)
plt.rc('axes', facecolor=BG_COLOR)
plt.rc('text', usetex=True)
plt.rc('font', **{'family': 'sans-serif', 'sans-serif': ['Avant Garde']})
# Create figure to plot
fig = plt.figure()
ax = plt.subplot(111)
# Add some extra space at the bottom of the plot
plt.subplots_adjust(bottom=0.14)
ax.tick_params('both', length=7.5, width=2, which='major')
# Adjust spines
spines = ['left', 'bottom']
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 6))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
gridlines = ax.get_xgridlines() + ax.get_ygridlines()
# Set grid line style
for line in gridlines:
line.set_linestyle('dotted')
line.set_linewidth(0.3)
line.set_color(FG_COLOR)
line.set_alpha(0.2)
line.set_zorder(-5)
minday = np.min(gbl_seaice['Day of Year'])
maxday = np.max(gbl_seaice['Day of Year'])
doy = np.arange(minday, maxday+1)
# import pdb
# pdb.set_trace()
pl = []
def plot_sigma(plt, doy, sigma, n, color=FG_COLOR, alpha=1, lw=0):
"""
Plots the the standard deviations.
"""
label = u'{0}$\sigma$ (pre-{1})'.format(n, PRE_YEAR)
return plt.fill_between(doy, mean+sigma*n, mean-sigma*n,
facecolor=color, alpha=alpha, linewidth=lw,
label=label, zorder=-10)
def mod_color(rgb_color, multiplier=0.15, light_style=light_style):
"""
Takes an RGB color (three float list) and adjusts the colors by the
multiplier values.
"""
hsv_color = c.rgb_to_hsv(rgb_color)
logging.debug('Input color value: {0}'.format(hsv_color[2]))
if light_style:
hsv_color[1] = hsv_color[1] * multiplier
else:
hsv_color[2] = hsv_color[2] * multiplier
logging.debug('Output color value: {0}'.format(hsv_color[2]))
return c.hsv_to_rgb(hsv_color)
pl.append(plot_sigma(plt, doy, sigma, 5,
color=mod_color([1, 0, 0.2], 0.225)))
pl.append(plot_sigma(plt, doy, sigma, 4,
color=mod_color([1, 0, 0.2], 0.125)))
pl.append(plot_sigma(plt, doy, sigma, 3,
color=mod_color([1, 0.7, 0])))
pl.append(plot_sigma(plt, doy, sigma, 2,
color=mod_color([0, 1, 0])))
pl.append(plot_sigma(plt, doy, sigma, 1,
color=BG_COLOR))
# Line width for mean and recent years
lw = 2.5
# Plot mean (with outline of BG_COLOR)
plt.plot(doy, mean, color=BG_COLOR, linewidth=lw+1.5,
zorder=2, linestyle='-')[0]
pl.append(plt.plot(doy, mean, color=FG_COLOR, linewidth=lw,
label='Mean (pre-{0})'.format(PRE_YEAR),
zorder=2, linestyle='-')[0])
# Get count of years of data
year_count = gbl_seaice['Year'].max() - gbl_seaice['Year'].min()
# Number of manually configured years
manual_years = 2
# Set colormap value depending on light/dark
if light_style:
color_map_end = 0.1
else:
color_map_end = 0.35
# Use colormap for each year (avoiding the darkest 35% of magma colors)
color = iter(plt.cm.magma(np.linspace(1, color_map_end,
year_count - manual_years)))
# Plot every year's specific data, some with manually set formatting
for key, grp in year_groups:
if (key >= 1979 and key <= 2018) or key >= 2016:
if key == 2017:
pl.append(plt.plot(grp[column], c='#ff00bb',
zorder=3, linewidth=lw, label=key)[0])
elif key == 2016:
pl.append(plt.plot(grp[column], c='#bb00ff',
zorder=3, linewidth=lw, label=key)[0])
# elif key == 2015:
# pl.append(plt.plot(grp['Total Extent'], c='red',
# zorder=2, linewidth=lw, label=key)[0])
# elif key == 2014:
# pl.append(plt.plot(grp['Total Extent'], c='orange',
# zorder=2, linewidth=lw, label=key)[0])
# elif key == 2013:
# pl.append(plt.plot(grp['Total Extent'], c='yellow',
# zorder=2, linewidth=lw, label=key)[0])
# elif key == 2012:
# pl.append(plt.plot(grp['Total Extent'], c='#00ff00',
# zorder=2, linewidth=lw, label=key)[0])
else:
# Plot all non-manually configured years
plt.plot(grp[column], c=next(color),
zorder=1, linewidth=0.7, alpha=0.5)
# Adjust legend and axes and plot them
le = plt.legend(shadow=False, fontsize=9, loc=legend_loc, fancybox=True,
ncol=2, handles=pl)
for text in le.get_texts():
text.set_color(FG_COLOR)
# Move ylabel a bit to the left for pretty, then plot the label
ax.yaxis.labelpad = 11
plt.ylabel(r'\textbf{Sea Ice Extent [$\times$10$^{6}$ sq. km, '
'%d day rolling avg.]}' % (ROLLING_WINDOW),
fontsize=13)
# Setup x-ticks and plot them
xlabels = calendar.month_abbr[1:13]
xlocations = list(map(lambda x: x+15, np.linspace(1, 366, 13)))
plt.xticks(np.linspace(1, 366, 13))
plt.setp(ax.get_xmajorticklabels(), visible=False)
ax.xaxis.set_minor_locator(ticker.FixedLocator(xlocations))
ax.xaxis.set_minor_formatter(ticker.FixedFormatter(xlabels))
plt.xlim([1, 366])
# Setup y-ticks and plot them
plt.yticks(np.arange(ymin, ymax + 2, 2),
map(str, np.arange(ymin, ymax + 2, 2)), fontsize=13)
plt.ylim([ymin, ymax])
# Adjust ytick label position
for tick in ax.yaxis.get_major_ticks():
tick.set_pad(4)
# Adjust xtick label position
for tick in ax.xaxis.get_minor_ticks():
tick.set_pad(0)
# Add all the misc info text to the figure
def it_annotate(text, infotext, it_offset_num):
it_xpos = 0
it_ypos = 0
it_offset_xbase = 1
it_offset_ybase = 1.25
it_offset_ysep = 7
it_pos_unit = 'axes fraction'
it_textcoords = 'offset points'
if infotext == 'top':
it_ypos = 1
it_offset_ybase = it_offset_ybase * -1 - 5.25
it_offset_ysep = it_offset_ysep * -1
it_xy = (it_xpos, it_ypos)
xytext = (it_offset_xbase, it_offset_ybase +
it_offset_num * it_offset_ysep)
ax.annotate(text,
fontsize=7.0, color=FG_COLOR, xy=it_xy, xytext=xytext,
xycoords=it_pos_unit, textcoords=it_textcoords, ha='left')
it_annotate(r'\textbf{DATA:} NSIDC Sea Ice Index, Version 2 (G02135)',
infotext=infotext, it_offset_num=2)
it_annotate(r'\textbf{CSV:} '
'ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/',
infotext=infotext, it_offset_num=1)
it_annotate(r'\textbf{GRAPHIC:} Emma M (GitHub: @emmatoday)',
infotext=infotext, it_offset_num=0)
ax.annotate(r'(For non-bold years, more recent years are more purple)',
fontsize=9, color=FG_COLOR, backgroundcolor=BG_COLOR,
xy=(0.5, 0.065),
xycoords='figure fraction', ha='center')
ax.annotate(r'Updated %s' % (iso_date),
fontsize=9, color=FG_COLOR, backgroundcolor=BG_COLOR,
xy=(0.5, 0.03),
xycoords='figure fraction', ha='center')
fig.suptitle(suptitle, fontsize=24, color=FG_COLOR, y=0.965)
if light_style:
output_filebase = '{0}_{1}_light'.format(filebase, iso_date)
else:
output_filebase = '{0}_{1}_dark'.format(filebase, iso_date)
if pdf:
pdf_filename = output_filebase + '.pdf'
plt.savefig(os.path.join(OUTPUT_PATH, pdf_filename), dpi=900)
print('\n' 'PDF Figure ({0}) plotted!'.format(pdf_filename))
if png:
png_filename = output_filebase + '.png'
plt.savefig(os.path.join(OUTPUT_PATH, png_filename), dpi=900)
print('\n' 'PNG Figure ({0}) plotted!'.format(png_filename))
def main():
"""
Main function that is called when script is executed from the CLI.
"""
now = dt.datetime.now()
cur_year = str(now.year)
# Prepare and load the data files downloading and updating as necessary
sea_ice_indexes = prep_data_files()
sea_ice_indexes['n'] = sea_ice_indexes['n'][['N Extent', 'Date']]
gbl_seaice = pd.merge(left=sea_ice_indexes['s'],
right=sea_ice_indexes['n'],
on='Date')
gbl_seaice.drop(['Missing', 'Source Data'], axis=1, inplace=True)
# Interpolate to fill in missing data in older years
gbl_seaice.interpolate(inplace=True)
# Add N and S to get global total ice extent
gbl_seaice['Total Extent'] = \
gbl_seaice['S Extent'] + gbl_seaice['N Extent']
# Set the index of the data to be the day of year
gbl_seaice.index = gbl_seaice['Day of Year']
# Set the index type to a a datetime
gbl_seaice.index = gbl_seaice.index.astype(dt.datetime)
n_seaice = gbl_seaice.drop(['Day', 'Date',
'Month', 'Total Extent', 'S Extent'],
axis=1)
s_seaice = gbl_seaice.drop(['Day', 'Date',
'Month', 'Total Extent', 'N Extent'],
axis=1)
# Drop columns we don't need anymore
gbl_seaice.drop(['Day', 'Date',
'Month', 'S Extent', 'N Extent'],
axis=1, inplace=True)
# Set titles
gt = r'\textbf{NSIDC Global Sea Ice Extent (1979-%s)}' % cur_year
nt = r'\textbf{NSIDC Arctic Sea Ice Extent (1979-%s)}' % cur_year
st = r'\textbf{NSIDC Antarctic Sea Ice Extent (1979-%s)}' % cur_year
# Set column names
gc = 'Total Extent'
nc = 'N Extent'
sc = 'S Extent'
# Set filename bases
gf = 'global_sea_ice'
nf = 'arctic_sea_ice'
sf = 'antarctic_sea_ice'
# Plot both the dark and light versions of the graphs
plot(gbl_seaice, column=gc, suptitle=gt, filebase=gf, light_style=True,
png=True, infotext='top')
plot(gbl_seaice, column=gc, suptitle=gt, filebase=gf, light_style=False,
png=True, infotext='top')
plot(n_seaice, column=nc, suptitle=nt, filebase=nf, light_style=True,
png=True, ymin=0, ymax=18, legend_loc='upper right')
plot(n_seaice, column=nc, suptitle=nt, filebase=nf, light_style=False,
png=True, ymin=0, ymax=18, legend_loc='upper right')
plot(s_seaice, column=sc, suptitle=st, filebase=sf, light_style=True,
png=True, ymin=0, ymax=22, infotext='top')
plot(s_seaice, column=sc, suptitle=st, filebase=sf, light_style=False,
png=True, ymin=0, ymax=22, infotext='top')
# Gets CLI arguments and sets up logging to stderr
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-v', '--verbose', action="count", dest="verbose",
default=2,
help="Increase the verbosity. "
"Use twice for extra effect")
parser.add_argument('-q', '--quiet', action="count", dest="quiet",
default=0,
help="Decrease the verbosity. "
"Use twice for extra effect")
args = parser.parse_args()
# Set up clean logging to stderr
log_levels = [logging.CRITICAL, logging.ERROR, logging.WARNING,
logging.INFO, logging.DEBUG]
args.verbose = min(args.verbose - args.quiet, len(log_levels) - 1)
args.verbose = max(args.verbose, 0)
logging.basicConfig(level=log_levels[args.verbose],
format='%(levelname)s: %(message)s')
# Call main function
main()
| mit |
dennissergeev/classcode | lib/cloudsat/cloudsat_tool.py | 1 | 14059 | """ modified 2012/11/24 to add cast for LayerTop
modified 2014/11/07
temporally remove "__main__"
add I/O for radar file
add I/O for lidar file
combine readrain.py
combine read_ecmwf.py
modified 2014/11/08 rename as cloudsat_tool
add monotonic option in get_geo
fix bugs
"""
import datetime
import dateutil.tz as tz
import numpy as np
import h5py
## used only in __main__
# import matplotlib.pyplot as plt
Tc=273.15
Pa2hPa=1.e2
m2km=1.e3
def convert_field(void_field):
"""
convert a numpy array of tuples
into a regular numpy array of the same
shape and dtype
"""
save_shape=void_field.shape
flat_test=void_field.flat
out_flat=np.empty(len(flat_test),dtype=flat_test[0][0].dtype)
for index,item in enumerate(flat_test):
out_flat[index]=item[0]
out=out_flat.reshape(save_shape)
return out
def get_geo(hdfname, monotonic_id=1):
"""given the name of any hdf file from the Cloudsat data archive
return lat,lon,time_vals,prof_times,dem_elevation
for the cloudsat orbital swath
usage: lat,lon,height,time_vals,prof_times,dem_elevation=get_geo(hdffile)
parameters:
input:
hdfname: string with name of hdf file from http://www.cloudsat.cira.colostate.edu/dataSpecs.php
monotonic_id: make the longitude monotonic (=1) or not (~=1)
output:
lat -- profile latitude in degrees east (1-D vector)
lon -- profile longitude in degrees north (1-D vector)
time_vals -- profile times in UTC (1D vector)
prof_times -- profile times in seconds since beginning of orbit (1D vector)
dem_elevation -- surface elevation in meters
"""
with h5py.File(hdfname,'r') as f:
root_name=f.keys()[0]
variable_names=['Longitude','Latitude','Profile_time','DEM_elevation']
var_dict={}
for var_name in variable_names:
var_dict[var_name]=convert_field(f[root_name]['Geolocation Fields'][var_name][...])
tai_start=f[root_name]['Geolocation Fields']['TAI_start'][0][0]
#
# <-------- Added on 2014/11/08
#
# ===================================================================== #
if monotonic_id==1:
lon=var_dict['Longitude'][:];
for id in range(0, len(lon)-1):
if lon[id+1] > lon[id]:
lon[id+1] = lon[id+1]-360
lonmin=np.amin(lon)
#
# basemap requires lons in the range -360 - 720 degrees
#
if lonmin < -360.:
lon[:]=lon[:] + 360.
var_dict['Longitude']=lon
# ===================================================================== #
#
#tai_start is the number of seconds since Jan 1, 1993 that the orbit
#began
taiDelta=datetime.timedelta(seconds=tai_start)
taiDayOne=datetime.datetime(1993,1,1,tzinfo=tz.tzutc())
#this is the start time of the orbit in seconds since Jan 1, 1993
orbitStart=taiDayOne + taiDelta
time_vals=[]
#now loop throught he radar profile times and convert them to
#python datetime objects in utc
for the_time in var_dict['Profile_time']:
date_time=orbitStart + datetime.timedelta(seconds=float(the_time))
time_vals.append(date_time)
var_dict['date_day']=np.array(time_vals)
neg_values=var_dict['DEM_elevation'] < 0
var_dict['DEM_elevation'][neg_values]=0
#
# return a list with the five variables
#
variable_names=['Latitude','Longitude','date_day','Profile_time','DEM_elevation']
out_list=[var_dict[varname] for varname in variable_names]
return out_list
def read_radar(hdfname, maskid=1,minmax=None):
"""
======================================================================
I/O functions for CloudSat. 2B-GEOPROF radar file
----------------------------------------------------------------------
height, reflect = read_radar(hdfname)
----------------------------------------------------------------------
Input:
hdfname: filename
maskid: do not mask (=0), mask as np.nan (=1),
mask as np.mask class (=2) for bad values.
Output:
reflect: radar reflectance, dbZ
height: height, km
======================================================================
"""
with h5py.File(hdfname, 'r') as obj:
height=obj['2B-GEOPROF/Geolocation Fields/Height'].value.astype(np.float)
height=height/m2km
reflect=obj['2B-GEOPROF/Data Fields/Radar_Reflectivity'].value.astype(np.float)
ref_scale=obj['2B-GEOPROF/Data Fields/Radar_Reflectivity'].attrs['factor']
ref_offset=obj['2B-GEOPROF/Data Fields/Radar_Reflectivity'].attrs['offset']
reflect=(reflect-ref_offset)/ref_scale
if minmax is None:
minmax=[-5,20]
ref_id=np.logical_or(reflect < minmax[0], reflect > minmax[1])
if maskid==1:
reflect[ref_id]=np.nan
if maskid==2:
reflect=np.ma.masked_where(ref_id, reflect)
return height, reflect
def read_lidar(hdfname, maskid=1):
"""
======================================================================
I/O functions for CloudSat. 2B-GEOPROF-LIDAR_GRANULE lidar file
----------------------------------------------------------------------
CFrac, LayerTop, LayerBase = read_lidar(hdfname)
----------------------------------------------------------------------
Input:
hdfname: filename
maskid: do not mask (=0), mask as np.nan (=1),
mask as np.mask class (=2) for bad values.
Output:
CFrac: cloud fraction, %
LayerTop: lider cloud top height, km
LayerBase: lider cloud base height, km
======================================================================
"""
with h5py.File(hdfname, 'r') as obj:
layerTop=obj['2B-GEOPROF-LIDAR/Data Fields/LayerTop'].value.astype(np.float)
layerBase=obj['2B-GEOPROF-LIDAR/Data Fields/LayerBase'].value.astype(np.float)
CFrac=obj['2B-GEOPROF-LIDAR/Data Fields/CloudFraction'].value.astype(np.float)
layerTop=layerTop/m2km
layerBase=layerBase/m2km
if maskid == 1:
layerTop[layerTop < 0]=np.nan
layerTop[layerBase < 0]=np.nan
CFrac[CFrac < 0]=np.nan
if maskid == 2:
layerTop=np.ma.masked_where(layerTop == 0, layerTop)
layerBase=np.ma.masked_where(layerBase < 0, layerBase)
CFrac=np.ma.masked_where(CFrac < 0, CFrac)
return CFrac, layerTop, layerBase
def read_ecmwf(hdfname, maskid=1):
"""
======================================================================
I/O functions for CloudSat. ECMWF-AUX file
----------------------------------------------------------------------
P, SLP, T, T2m, SKT, q, O3 = read_ecmwf(hdfname)
----------------------------------------------------------------------
Input:
hdfname: filename
maskid: do not mask (=0), mask as np.nan (=1),
mask as np.mask class (=2) for bad values.
Output:
P: Pressure, hPa, 2-D array
SLP: Sea level pressure,
T: Temperature, degC, 2-D array
T2m: Temperature on 2m above surface, degC, 1-D array
SKT: Surface Skin Temperature, degC, 1-D array
q: Specific Humidity, kg/kg, 2-D array
O3: Ozone mixing ratio, kg/kg, 2-D array
======================================================================
"""
with h5py.File(hdfname, 'r') as obj:
P=obj['ECMWF-AUX/Data Fields/Pressure']
SLP=obj['ECMWF-AUX/Data Fields/Surface_pressure']
T=obj['ECMWF-AUX/Data Fields/Temperature']
T2m=obj['ECMWF-AUX/Data Fields/Temperature_2m']
SKT=obj['ECMWF-AUX/Data Fields/Skin_temperature']
q=obj['ECMWF-AUX/Data Fields/Specific_humidity']
O3=obj['ECMWF-AUX/Data Fields/Ozone']
var_list=[P,SLP,T,T2m,SKT,q,O3]
missing_vals=[item.attrs['missing'] for item in var_list]
var_list=[item.value for item in var_list]
mask_plus_var=zip(missing_vals,var_list)
def nan_mask(mask_val,var):
var[var == mask_val] = np.nan
return var
def ma_mask(mask_val,var):
var=np.ma.masked_where(var == mask_val, var)
return var
if maskid == 1:
out_vars=[nan_mask(mask_val,var) for mask_val,var in mask_plus_var]
if maskid == 2:
out_vars=[ma_mask(mask_val,var) for mask_val,var in mask_plus_var]
var_list=[item.astype(np.float) for item in out_vars]
P,SLP,T,T2m,SKT,q,O3=var_list
P=P/Pa2hPa
SLP=SLP/Pa2hPa
T=T- Tc
T2m=T2m- Tc
SKT=SKT- Tc
return P,SLP,T,T2m,SKT,q,O3
def read_rain(hdfname, maskid=1):
"""
======================================================================
I/O functions for CloudSat. CS_2C-RAIN-PROFILE file
----------------------------------------------------------------------
rain, precli, precice, clw = read_ecmwf(hdfname)
----------------------------------------------------------------------
Input:
hdfname: filename
maskid: do not mask (=0), mask as np.nan (=1),
mask as np.mask class (=2) for bad values.
Output:
rain: Rain rate, mm/hr, 2-D array
precli: Liquid precipitation water content, g/m^3, 2-D array
precice: Ice precipitation water content, g/m^3, 2-D array
clw: Cloud liquid water content, degC, g/m^3 2-D array
======================================================================
"""
with h5py.File(hdfname, 'r') as obj:
rainRAW=obj['2C-RAIN-PROFILE/Data Fields/rain_rate'].value.astype(np.float)
rain_factor=obj['2C-RAIN-PROFILE/Data Fields/rain_rate'].attrs['factor']
rain_missing=obj['2C-RAIN-PROFILE/Data Fields/rain_rate'].attrs['missing']
rain=rainRAW*rain_factor
precliRAW=obj['2C-RAIN-PROFILE/Data Fields/precip_liquid_water'].value.astype(np.float)
precli_factor=obj['2C-RAIN-PROFILE/Data Fields/precip_liquid_water'].attrs['factor']
precli_missing=obj['2C-RAIN-PROFILE/Data Fields/precip_liquid_water'].attrs['missing']
precli=precliRAW*precli_factor
preciceRAW=obj['2C-RAIN-PROFILE/Data Fields/precip_ice_water'].value.astype(np.float)
precice_factor=obj['2C-RAIN-PROFILE/Data Fields/precip_ice_water'].attrs['factor']
precice_missing=obj['2C-RAIN-PROFILE/Data Fields/precip_ice_water'].attrs['missing']
precice=precliRAW*precli_factor
clwRAW=obj['2C-RAIN-PROFILE/Data Fields/cloud_liquid_water'].value.astype(np.float)
clw_factor=obj['2C-RAIN-PROFILE/Data Fields/cloud_liquid_water'].attrs['factor']
clw_missing=obj['2C-RAIN-PROFILE/Data Fields/cloud_liquid_water'].attrs['missing']
clw=clwRAW*clw_factor
if maskid == 1:
rain[rainRAW == rain_missing]=np.nan
precli[precliRAW == precli_missing]=np.nan
precice[preciceRAW == precice_missing]=np.nan
clw[clwRAW == clw_missing]=np.nan
if maskid == 2:
rain=np.ma.masked_where(rainRAW == rain_missing, rain)
precli=np.ma.masked_where(precliRAW == precli_missing, precli)
precice=np.ma.masked_where(preciceRAW == precice_missing, precice)
clw=np.ma.masked_where(clwRAW == clw_missing, clw)
return rain, precli, precice, clw
#if __name__=="__main__":
#this flag makes sure the data file can't be overwritten
#radar reflectivity data see
#http://www.cloudsat.cira.colostate.edu/dataSpecs.php?prodid=9
# radar_file='2010247105814_23156_CS_2B-GEOPROF_GRANULE_P_R04_E03.h5'
# lat,lon,date_day,prof_seconds,dem_elevation=get_geo(radar_file)
# lidar_file='2010247105814_23156_CS_2B-GEOPROF-LIDAR_GRANULE_P2_R04_E03.h5'
## #
## # height values stored as an SD dataset
## #
# with h5py.File(radar_file,'r') as f:
# height=f['2B-GEOPROF']['Geolocation Fields']['Height'].value
# height=height.astype(np.float)
# refl_vals=f['2B-GEOPROF']['Data Fields']['Radar_Reflectivity'].value
# refl_vals=refl_vals.astype(np.float)
# refl_scale=(f['2B-GEOPROF']['Swath Attributes']['Radar_Reflectivity.factor'].value)[0][0]
# refl_vals=refl_vals/refl_scale
# with h5py.File(lidar_file,'r') as f:
# layerTop=f['2B-GEOPROF-LIDAR/Data Fields/LayerTop'].value
# layerTop=layerTop.astype(np.float)
# layerTop[layerTop < 0]=np.nan
# plt.close('all')
# fig1=plt.figure(1)
# fig1.clf()
# axis1=fig1.add_subplot(1,1,1)
# start=21000
# stop=22000
# start=5000
# stop=6000
#
# subset the array
#
# part_refl=refl_vals[start:stop,:]
#
# mask out the uninteresting reflectivities
#
# hit=np.logical_or(part_refl < -5.,part_refl > 20)
# refl_masked=np.ma.masked_where(part_refl,hit)
#
# convert height to km
#
# im=axis1.pcolormesh(prof_seconds[start:stop],height[0,:]/1.e3,refl_masked.T)
# axis1.set_xlabel('time after orbit start (seconds)')
# axis1.set_ylabel('height (km)')
# start,stop=[item.strftime('%Y-%m-%d %H:%M:%S') for item in (date_day[start],date_day[stop])]
# axis1.set_title('{} to {}'.format(start,stop))
# axis1.set_ylim([0,10])
# cb=fig1.colorbar(im)
# cb.set_label('reflectivity (dbZ)')
# fig1.savefig('reflectivity.png')
# fig2=plt.figure(2)
# axis2=fig2.add_subplot(1,1,1)
# axis2.plot(prof_seconds,layerTop[:,0]/1.e3,'b')
# axis2.plot(prof_seconds,dem_elevation/1.e3,'r')
# axis2.set_xlabel('time after orbit start (seconds)')
# axis2.set_ylabel('height (km)')
# axis2.set_title('lidar cloud top (blue) and dem surface elevation (red)')
# fig2.savefig('lidar_height.png')
# plt.show()
| cc0-1.0 |
spallavolu/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
gfyoung/pandas | pandas/tests/frame/methods/test_reindex_like.py | 8 | 1187 | import numpy as np
import pytest
from pandas import DataFrame
import pandas._testing as tm
class TestDataFrameReindexLike:
def test_reindex_like(self, float_frame):
other = float_frame.reindex(index=float_frame.index[:10], columns=["C", "B"])
tm.assert_frame_equal(other, float_frame.reindex_like(other))
@pytest.mark.parametrize(
"method,expected_values",
[
("nearest", [0, 1, 1, 2]),
("pad", [np.nan, 0, 1, 1]),
("backfill", [0, 1, 2, 2]),
],
)
def test_reindex_like_methods(self, method, expected_values):
df = DataFrame({"x": list(range(5))})
result = df.reindex_like(df, method=method, tolerance=0)
tm.assert_frame_equal(df, result)
result = df.reindex_like(df, method=method, tolerance=[0, 0, 0, 0])
tm.assert_frame_equal(df, result)
def test_reindex_like_subclass(self):
# https://github.com/pandas-dev/pandas/issues/31925
class MyDataFrame(DataFrame):
pass
expected = DataFrame()
df = MyDataFrame()
result = df.reindex_like(expected)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
chrisjsewell/PyGauss | pygauss/molecule.py | 1 | 78698 | # -*- coding: utf-8 -*-
"""
Created on Fri May 01 21:24:31 2015
@author: chris
"""
import os
from io import BytesIO
import PIL
from PIL import Image, ImageChops
import copy
import warnings
from math import degrees, atan2, sqrt, acos
import numpy as np
from scipy.signal import argrelextrema
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.colors import ColorConverter
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
from IPython.display import Image as ipy_Image
from .chemlab_patch.io.handlers._cclib import Handler
from chemlab.graphics.qtviewer import QtViewer
#have to add method to instances of chemlab.graphics.camera.Camera
#from chemlab.graphics.transformations import rotation_matrix
from .transformations import rotation_matrix
def orbit_z(self, angle):
# Subtract pivot point
self.position -= self.pivot
# Rotate
rot = rotation_matrix(-angle, self.c)[:3,:3]
self.position = np.dot(rot, self.position)
# Add again the pivot point
self.position += self.pivot
self.a = np.dot(rot, self.a)
self.b = np.dot(rot, self.b)
self.c = np.dot(rot, self.c)
from chemlab.graphics.camera import Camera
Camera.orbit_z = orbit_z
from .chemlab_patch.graphics.renderers.atom import AtomRenderer
from .chemlab_patch.graphics.renderers.ballandstick import BallAndStickRenderer
from .chemlab_patch.graphics.renderers.line import LineRenderer
from .chemlab_patch.graphics.renderers.triangles import TriangleRenderer
from chemlab.graphics.renderers.wireframe import WireframeRenderer
#from chemlab.graphics.postprocessing import SSAOEffect # Screen Space Ambient Occlusion
from chemlab.utils import cartesian
from cclib.parser.utils import convertor
from chemlab.graphics.colors import get as str_to_colour
from chemlab.qc import molecular_orbital
#improvement to function
#TODO not making this a depedency until it works
from chemlab.qc.pgbf import pgbf
try:
import numexpr as ne
def __call__(self,x,y,z):
"Compute the amplitude of the PGBF at point x,y,z"
I,J,K = self.powers
dx,dy,dz = x-self.origin[0],y-self.origin[1],z-self.origin[2]
n = self.norm
e = self.exponent
return ne.evaluate(
'n*(dx**I)*(dy**J)*(dz**K) * exp(-e*(dx*dx + dy*dy + dz*dz))')
pgbf.__call__ = __call__
except ImportError:
pass
#instead of chemview MolecularViewer to add defined colouring
#also ignore; 'FutureWarning: IPython widgets are experimental and may change in the future.'
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from .chemview_patch.viewer import MolecularViewer
from .utils import circumcenter
from .file_io import Folder
from .isosurface import get_isosurface
class Molecule(object):
def __init__(self, folderpath='',
init_fname=False, opt_fname=False,
freq_fname=False, nbo_fname=False,
pes_fname=False,
fail_silently=False,
atom_groups={}, alignto=[],
server=None, username=None, passwrd=None,
folder_obj=None):
"""a class to analyse gaussian input/output of a single molecular geometry
Parameters
----------
folderpath : str
the folder path
init_fname : str
the intial geometry (.com) file
opt_fname : str or list of str
the optimisation log file
freq_fname : str
the frequency analysis log file
nbo_fname : str
the population analysis logfile
pes_fname : str
the potential energy scan logfile
fail_silently : bool
whether to raise an error if a file read fails (if True can use get_init_read_errors to see errors)
atom_groups: {str:[int, ...]}
groups of atoms that can be selected as a subset
alignto: [int, int, int]
the atom numbers to align the geometry to
Notes
-----
any of the file names can have wildcards (e.g. 'filename*.log) in them,
as long as this resolves to a single path in the directory
NB: nbo population analysis must be run with the GFInput flag to ensure
data is output to the log file
"""
if folder_obj:
self._folder = folder_obj
else:
self._folder = Folder(folderpath,
server, username, passwrd)
self._init_data = None
self._prev_opt_data = []
self._opt_data = None
self._freq_data = None
self._nbo_data = None
self._pes_data = []
self._alignment_atom_indxs = ()
self._t_matrix = None
if alignto:
self.set_alignment_atoms(*alignto)
self._atom_groups = atom_groups
parts=[['init', init_fname, self.add_initialgeom],
['opt', opt_fname, self.add_optimisation],
['freq', freq_fname, self.add_frequency],
['nbo', nbo_fname, self.add_nbo_analysis],
['pes', pes_fname, self.add_pes_analysis]]
self._init_read_errors = []
for typ, fname, method in parts:
if fname:
if fail_silently:
try:
method(fname)
except Exception, e:
self._init_read_errors.append([typ, fname, str(e)])
else:
method(fname)
def get_folder(self):
""" return the Folder instance """
return self._folder
def get_atom_group(self, group):
"""return list of atoms in group """
if group is None:
return group
if type(group) is str:
if not self._atom_groups.has_key(group):
raise ValueError('the molecule does not have an; {0}, atom group'.format(group))
return self._atom_groups[group]
atoms = []
for i in group:
if type(i) is str:
if not self._atom_groups.has_key(i):
raise ValueError('the molecule does not have an; {0}, atom group'.format(i))
atoms.extend(self._atom_groups[i])
elif type(i) is int:
atoms.append(i)
else:
raise ValueError('atom must be an integer')
return list(set(atoms))
def get_init_read_errors(self):
""" get read errors, recorded if fail_silently was set to True on initialise """
return self._init_read_errors[:]
def __repr__(self):
return '<PyGauss Molecule>'
def __deepcopy__(self, memo):
if not self._folder.islocal():
warnings.warn('Cannot deepcopy non-local folder object (reverting to user home path)')
self._folder = Folder(os.path.expanduser('~'))
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
return result
def _get_data(self, file_name, ftype='gaussian'):
if not self._folder.active():
with self._folder as folder:
with folder.read_file(file_name) as fd:
data = Handler(fd, ftype)
else:
with self._folder.read_file(file_name) as fd:
data = Handler(fd, ftype)
return data
def add_initialgeom(self, file_name):
self._init_data = self._get_data(file_name, ftype='gausscom')
def add_optimisation(self, file_name):
if type(file_name) is list or type(file_name) is tuple:
self._opt_data = self._get_data(file_name[-1])
self._prev_opt_data = []
for f in file_name[:-1]:
self._prev_opt_data.append(self._get_data(f))
else:
self._opt_data = self._get_data(file_name)
def add_frequency(self, file_name):
self._freq_data = self._get_data(file_name)
def add_nbo_analysis(self, file_name):
self._nbo_data = self._get_data(file_name)
def add_pes_analysis(self, file_names):
if type(file_names) is str:
file_names = [file_names]
self._pes_data = [self._get_data(fname) for fname in file_names]
def _read_data(self, ftype, dtype):
""" read data """
if not getattr(self, ftype):
raise ValueError(
'{0} has not been set for this molecule'.format(ftype))
return getattr(self, ftype).read(dtype)
def get_basis_descript(self):
return self._read_data('_opt_data', 'basis_descript')
def get_basis_funcs(self):
return self._read_data('_opt_data', 'nbasis')
def get_run_error(self, rtype='opt'):
"""True if there were errors in the computation, else False """
return getattr(self, '_{0}_data'.format(rtype)).read('run_error')
def is_optimised(self):
""" was the geometry optimised """
return self._read_data('_opt_data', 'optdone')
def get_opt_energy(self, units='eV', final=True, zpe_correct=False):
""" return the SCF optimisation energy(s)
Parameters
----------
units : str
the unit type of the energy
final : bool
return only the final optimised energy if True, else for all steps
zpe_correct : bool
apply zero-point energy correction (found in frequency log) to final optimised energy
Returns
-------
energy : float or list of floats
dependant on final
"""
if not self._opt_data:
return np.nan
energies = self._read_data('_opt_data', 'scfenergies')
if energies.shape[0] == 0:
return np.nan if final else energies
if not units == 'eV':
energies = convertor(energies, 'eV', units)
if zpe_correct:
energies[-1] += self.get_zeropt_energy(units=units)
return energies[-1] if final else energies
def get_zeropt_energy(self, units='eV'):
""" return the zero-point energy correction
Parameters
----------
units : str
the unit type of the energy
Returns
-------
energy : float
zero-point energy correction
"""
if not self._freq_data:
return np.nan
energy = self._read_data('_freq_data', 'zeropt_energy')
if not units == 'eV':
energy = convertor(energy, 'eV', units)
return energy
def plot_opt_energy(self, units='eV', linecolor='blue', ax=None):
""" plot SCF optimisation energy
Returns
-------
data : matplotlib.axes.Axes
plotted optimisation data
"""
energies = self._read_data('_opt_data', 'scfenergies')
ylabel = 'Energy ({0})'.format(units)
xlabel = 'Optimisation Step'
for data in reversed(self._prev_opt_data):
energies = np.concatenate([data.read('scfenergies'), energies])
if not units == 'eV':
energies = convertor(energies, 'eV', units)
if not ax:
f, ax = plt.subplots()
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.grid(True)
ax.plot(energies, color=linecolor)
return ax
def is_conformer(self, cutoff=0.):
"""False if any frequencies in the frequency analysis were negative"""
imgaginary_freqs = self._read_data('_freq_data', 'vibfreqs') < cutoff
return not imgaginary_freqs.any()
def get_freq_analysis(self):
"""return frequency analysis
Returns
-------
data : pandas.DataFrame
frequency data
"""
frequencies = self._read_data('_freq_data', 'vibfreqs')
irs = self._read_data('_freq_data', 'vibirs')
return pd.DataFrame(zip(frequencies, irs),
columns=['Frequency ($cm^{-1}$)',
'IR Intensity ($km/mol$)'])
def plot_freq_analysis(self, color='blue', alpha=1, marker_size=20, ax=None):
"""plot frequency analysis
Returns
-------
data : matplotlib.axes.Axes
plotted frequency data
"""
df = self.get_freq_analysis()
if not ax:
fig, ax = plt.subplots()
ax.grid(True)
ax.set_xlabel('Frequency ($cm^{-1}$)')
ax.set_ylabel('IR Intensity ($km/mol$)')
ax.bar(df['Frequency ($cm^{-1}$)'], df['IR Intensity ($km/mol$)'],
align='center', width=30, linewidth=0, alpha=alpha,color=color)
ax.scatter(df['Frequency ($cm^{-1}$)'], df['IR Intensity ($km/mol$)'] ,
marker='o',alpha=alpha,color=color, s=marker_size)
ax.set_ybound(-10)
return ax
def set_alignment_atoms(self, idx1, idx2, idx3):
assert type(idx1) is int and type(idx2) is int and type(idx3) is int
self._alignment_atom_indxs = (idx1, idx2, idx3)
def remove_alignment_atoms(self):
self._alignment_atom_indxs = ()
def _midpoint_coordinates(self, coord_list):
return np.mean(np.array(coord_list), axis=0)
def _midpoint_atoms(self, molecule, atom_ids):
return np.mean(molecule.r_array[atom_ids], axis=0)
def _create_transform_matrix(self, c1, c2, c3):
"""
A function to take three coordinates and creates a transformation matrix
that aligns their plane with the standard axes
there centre point will be at (0, 0, 0)
c1 will be aligned to the x-axis
the normal to the plane will be aligned to the z-axis
"""
# find midpoint of coords
c0 = circumcenter([c1, c2, c3])
#c0 = self._midpoint_coordinates([c1, c2, c3])
#translate c0 to the origin [0,0,0] and pick two vectors
v1=c1-c0; v2=c2-c0; v3=c3-c0
#now find the orthonormal basis set
# a plane is a*x+b*y+c*z+d=0 where[a,b,c] is the normal and d is 0
# (since the origin now intercepts the plane). Thus, we calculate;
normal = np.cross(v2,v3)
#a, b, c = normal
vf3 = normal/np.linalg.norm(normal)
vf1 = v1/np.linalg.norm(v1)
vf2 = np.cross(vf3, vf1)
vf2 = vf2/np.linalg.norm(vf2)
#create the translation matrix that moves the new basis to the origin
ident=np.vstack((np.identity(3), np.zeros(3)))
translate_matrix = np.hstack((ident, np.array(np.append(-c0, 1))[np.newaxis].T))
#create the rotation matrix that rotates the new basis onto the standard basis
rotation_matrix = np.hstack((np.array([vf1, vf2, vf3, np.zeros(3)]),
np.array(np.append([0, 0, 0], 1))[np.newaxis].T))
# translate before rotating
transform_matrix = np.dot(rotation_matrix, translate_matrix)
return transform_matrix
def _apply_transfom_matrix(self, transform_matrix, coords):
"""apply transform matrix calculated in _create_transform_matrix """
if transform_matrix is None:
return coords
t_coords = [np.dot(transform_matrix,
np.array(np.append(coord, 1))[np.newaxis].T)[:-1].flatten()
for coord in coords]
return np.array(t_coords)
def _create_molecule(self, optimised=True, opt_step=False, scan_step=False,
gbonds=True, data=None, alignment_atoms=None):
"""create molecule """
if not optimised:
molecule = self._read_data('_init_data', 'molecule')
else:
indata = data if data else self._opt_data
if not type(opt_step) is bool:
molecule = indata.read('molecule', step=opt_step)
elif not type(scan_step) is bool:
molecule = indata.read('molecule', scan=scan_step)
else:
molecule = indata.read('molecule')
if gbonds: molecule.guess_bonds()
t_matrix = None
if alignment_atoms:
a, b, c = alignment_atoms
a0, b0, c0 = molecule.r_array[[a-1,b-1,c-1]]
t_matrix = self._create_transform_matrix(a0,b0,c0)
elif self._alignment_atom_indxs:
a, b, c = self._alignment_atom_indxs
a0, b0, c0 = molecule.r_array[[a-1,b-1,c-1]]
t_matrix = self._create_transform_matrix(a0,b0,c0)
molecule.r_array = self._apply_transfom_matrix(t_matrix, molecule.r_array)
self._t_matrix = t_matrix
return molecule
#instead of from chemlab.notebook import display_molecule to add ball_stick
def _view_molecule(self, molecule, represent='vdw', colorlist=[]):
"""active representationion of molecule using chemview
"""
allowed_rep = ['none', 'wire', 'vdw', 'ball_stick']
if represent not in allowed_rep:
raise ValueError(
'unknown molecule representation: {0}, must be in {1}'.format(
represent, allowed_rep))
topology = {
'atom_types': molecule.type_array,
'bonds': molecule.bonds
}
mv = MolecularViewer(molecule.r_array, topology)
if molecule.n_bonds != 0:
if represent=='ball_stick':
mv.ball_and_sticks(colorlist=colorlist)
elif represent=='wire':
mv.points(size=0.15, colorlist=colorlist)
mv.lines(colorlist=colorlist)
else:
raise NotImplementedError('none and vdw not implemented in active view')
else:
mv.points()
return mv
def _trim_image(self, im):
"""
a simple solution to trim whitespace on the image
1. It gets the border colour from the top left pixel, using getpixel,
so you don't need to pass the colour.
2. Subtracts a scalar from the differenced image,
this is a quick way of saturating all values under 100, 100, 100 to zero.
So is a neat way to remove any 'wobble' resulting from compression.
"""
bg = Image.new(im.mode, im.size, im.getpixel((0,0)))
diff = ImageChops.difference(im, bg)
diff = ImageChops.add(diff, diff, 2.0, -100)
bbox = diff.getbbox()
if bbox:
return im.crop(bbox)
def _image_molecule(self, molecule, represent='vdw', background='white',
colorlist=[], transparent=False,
rotation=[0., 0., 0.], width=300, height=300, zoom=1.,
lines=[], linestyle='impostors',
surfaces=[]):
"""create image of molecule
Parameters
----------
molecule : chemlab.core.molecule.Molecule
the molecule to image
represent : str
representation of molecule ('none', 'wire', 'vdw' or 'ball_stick')
background : matplotlib.colors
background color
colorlist : list
color override for each of the atoms (if empty colored by atom type)
transparent=True :
whether atoms should be transparent (based on alpha value)
zoom : float
zoom level of images
width : int
width of original images
height : int
height of original images (although width takes precedent)
lines : list
lines to add to the image in the form;
[start_coord, end_coord, start_color, end_color, width, dashed]
surfaces : list
surfaces to add to the image in the format;
[vertices, normals, colors, transparent, wireframe]
Returns
-------
image : PIL.Image
an image of the system
"""
allowed_rep = ['none', 'wire', 'vdw', 'ball_stick']
if represent not in allowed_rep:
raise ValueError(
'unknown molecule representation: {0}, must be in {1}'.format(
represent, allowed_rep))
v = QtViewer()
w = v.widget
w.background_color = tuple([int(i*255) for i in ColorConverter().to_rgba(background)])
w.initializeGL()
if represent=='ball_stick':
r = v.add_renderer(BallAndStickRenderer,
molecule.r_array,
molecule.type_array,
molecule.bonds,
rgba_array=colorlist,
linestyle=linestyle,
transparent=transparent)
elif represent=='vdw':
r = v.add_renderer(AtomRenderer,
molecule.r_array,
molecule.type_array,
rgba_array=colorlist,
transparent=transparent)
elif represent=='wire':
r = v.add_renderer(WireframeRenderer,
molecule.r_array,
molecule.type_array,
molecule.bonds)
elif represent=='none':
r = None
for line in lines:
#line = [start_coord, end_coord, start_color, end_color, width, dashed]
#for some reason it didn't like unpacking them to named variables
v.add_renderer(LineRenderer, [line[0], line[1]],
[[str_to_colour(line[2]), str_to_colour(line[3])]],
width=line[4], dashed=line[5])
for surface in surfaces:
vertices, normals, colors, transparent, wireframe = surface
v.add_renderer(TriangleRenderer, vertices, normals, colors,
transparent=transparent,
wireframe=wireframe)
#v.add_post_processing(SSAOEffect)
w.camera.autozoom(molecule.r_array*1./zoom)
w.camera.orbit_x(rotation[0]*np.pi/180.)
w.camera.orbit_y(rotation[1]*np.pi/180.)
w.camera.orbit_z(rotation[2]*np.pi/180.)
image = w.toimage(width, height)
# Cleanup
v.clear()
del v
del w
del r
return self._trim_image(image)
def _concat_images_horizontal(self, images, gap=10, background='white'):
""" concatentate one or more PIL images horizontally
Parameters
----------
images : PIL.Image list
the images to concatenate
gap : int
the pixel gap between images
background : PIL.ImageColor
background color (as supported by PIL.ImageColor)
"""
if len(images) == 1: return images[0]
total_width = sum([img.size[0] for img in images]) + len(images)*gap
max_height = max([img.size[1] for img in images])
final_img = PIL.Image.new("RGBA", (total_width, max_height), color=background)
horizontal_position = 0
for img in images:
final_img.paste(img, (horizontal_position, 0))
horizontal_position += img.size[0] + gap
return final_img
def _concat_images_vertical(self, images, gap=10):
""" concatentate one or more PIL images vertically
Parameters
----------
images : PIL.Image list
the images to concatenate
gap : int
the pixel gap between images
"""
if len(images) == 1: return images[0]
total_width = sum([img.size[0] for img in images])
max_height = max([img.size[1] for img in images]) + len(images)*gap
final_img = PIL.Image.new("RGBA", (total_width, max_height), color='white')
vertical_position = 0
for img in images:
final_img.paste(img, (0, vertical_position))
vertical_position += img.size[1] + gap
return final_img
def _color_to_transparent(self, image, color=(255, 255, 255)):
""" sets alpha to 0 for specific colour in PIL image
Parameters
----------
image : PIL.Image
the images to process
color : (int, int, int)
the RGB (0 to 255) color to set alpha to 0
"""
datas = image.getdata()
newData = []
for item in datas:
if item[0] == color[0] and item[1] == color[1] and item[2] == color[2]:
newData.append((color[0], color[1], color[2], 0))
else:
newData.append(item)
image.putdata(newData)
return image
def _show_molecule(self, molecule, active=False,
represent='vdw', rotations=[[0., 0., 0.]],
background='white', colorlist=[], transparent=False,
axis_length=0, lines=[], linestyle='impostors',
surfaces=[],
zoom=1., width=300, height=300, ipyimg=True):
"""show the molecule
Parameters
----------
molecule : chemlab.core.molecule.Molecule
the molecule to image
active : bool
whether the molecule representation should be interactive
(ipython notebook only)
represent : str
representation of molecule ('none', 'wire', 'vdw' or 'ball_stick')
background : matplotlib.colors
background color
colorlist : list
color override for each of the atoms (if empty colored by atom type)
transparent=True :
whether atoms should be transparent (based on alpha value)
axis_length :float
if non-zero lines will be drawn along each axis to +/- axis_length
lines : list
lines to add to the image in the form;
[start_coord, end_coord, start_color, end_color, width, dashed]
surfaces : list
surfaces to add to the image in the format;
[vertices, normals, colors, transparent, wireframe]
zoom : float
zoom level of images
width : int
width of original images
height : int
height of original images (although width takes precedent)
ipyimg : bool
whether to return an IPython image, PIL image otherwise
Returns
-------
mol : IPython.display.Image or PIL.Image
an image of the molecule in the format specified by ipyimg
or an active representation
"""
if active:
return self._view_molecule(molecule, represent=represent,
colorlist=colorlist)
else:
drawlines=lines[:]
if axis_length:
if type(axis_length) is list or type(axis_length) is tuple:
neg_length, pos_length = axis_length
else:
neg_length = pos_length = axis_length
drawlines.append([(-1*neg_length,0,0), (pos_length,0,0),
'red', 'dark_red', 3, True])
drawlines.append([(0,-1*neg_length,0), (0,pos_length,0),
'light_green', 'dark_green', 3, True])
drawlines.append([(0,0,-1*neg_length), (0,0,pos_length),
'light_blue', 'dark_blue', 3, True])
images = []
for rotation in rotations:
images.append(self._image_molecule(molecule,
represent=represent,
background=background, colorlist=colorlist,
rotation=rotation, zoom=zoom,
width=width, height=width,
lines=drawlines, linestyle=linestyle,
transparent=transparent, surfaces=surfaces))
image = self._concat_images_horizontal(images, background=background)
del images
if ipyimg:
b = BytesIO()
image.save(b, format='png')
return ipy_Image(data=b.getvalue())
else:
return image
def show_initial(self, gbonds=True, active=False, represent='vdw',
rotations=[[0., 0., 0.]], zoom=1., width=300, height=300,
axis_length=0, lines=[], background='white', ipyimg=True):
"""show initial geometry (before optimisation) of molecule coloured by atom type """
molecule = self._create_molecule(optimised=False, gbonds=gbonds)
return self._show_molecule(molecule, active=active, background=background,
represent=represent,
rotations=rotations, zoom=zoom,
lines=lines, axis_length=axis_length, ipyimg=ipyimg)
def show_optimisation(self, opt_step=False, gbonds=True, active=False,
represent='vdw', rotations=[[0., 0., 0.]], zoom=1.,
width=300, height=300, axis_length=0, lines=[],
background='white', ipyimg=True):
"""show optimised geometry of molecule coloured by atom type """
molecule = self._create_molecule(optimised=True, opt_step=opt_step,
gbonds=gbonds)
return self._show_molecule(molecule, active=active, background=background,
represent=represent,
rotations=rotations, zoom=zoom,
lines=lines, axis_length=axis_length,
width=width, height=height, ipyimg=ipyimg)
def _rgb_to_hex(self, rgb):
"""convert RGB color to hex format"""
return int('0x%02x%02x%02x' % rgb[:3], 16)
def _get_highlight_colors(self, natoms, atomlists, active=False, alpha=0.7):
norm = mpl.colors.Normalize(vmin=1, vmax=len(atomlists))
cmap = cm.jet_r
m = cm.ScalarMappable(norm=norm, cmap=cmap)
colorlist = [(211, 211, 211, int(255*alpha)) for n in range(natoms)]
for n in range(natoms):
for group, atomlist in enumerate(atomlists):
if n+1 in atomlist:
colorlist[n] = m.to_rgba(group+1, bytes=True)
break
if active:
colorlist = [self._rgb_to_hex(col) for col in colorlist]
return colorlist
def show_highlight_atoms(self, atomlists, transparent=False, alpha=0.7,
gbonds=True, active=False, optimised=True, background='white',
represent='vdw', rotations=[[0., 0., 0.]], zoom=1.,
width=300, height=300, axis_length=0, lines=[], ipyimg=True):
"""show optimised geometry of molecule with certain atoms highlighted """
if optimised:
natoms = self._read_data('_opt_data', 'natom')
else:
natoms = self._read_data('_init_data', 'natom')
atomlists=[self.get_atom_group(grp) for grp in atomlists]
colorlist = self._get_highlight_colors(natoms, atomlists, active,
alpha=alpha)
molecule = self._create_molecule(optimised=optimised, gbonds=gbonds)
if transparent:
linestyle='lines'
else:
linestyle='impostors'
return self._show_molecule(molecule, active=active,
transparent=transparent,
represent=represent, background=background,
rotations=rotations, zoom=zoom,
colorlist=colorlist, linestyle=linestyle,
lines=lines, axis_length=axis_length,
width=width, height=height, ipyimg=ipyimg)
def _write_init_file(self, molecule, file_name, descript='',
overwrite=False, decimals=8,
charge=0, multiplicity=1,
folder_obj=None):
""" write a template gaussian input file to folder
"""
if not type(charge) is int or not type(multiplicity) is int:
raise ValueError('charge and multiplicity of molecule must be defined')
if not folder_obj:
folder_obj = self._folder
with folder_obj as folder:
with folder.write_file(file_name+'_init.com', overwrite) as f:
f.write('%chk={0}_init.chk \n'.format(file_name))
f.write('# opt b3lyp/3-21g \n')
f.write('\n')
f.write('{0} \n'.format(descript))
f.write('\n')
f.write('{0} {1} \n'.format(charge, multiplicity))
for t, c in zip(molecule.type_array, molecule.r_array*10.): # nanometers to angstrom
x, y, z = c.round(decimals)
f.write(' {0}\t{1}\t{2}\t{3} \n'.format(t, x, y, z))
f.write('\n')
return True
def _array_transformation(self, array, rotations, transpose=[0,0,0]):
""" 3D rotation around x-axis, then y-axis, then z-axis,
then transposition """
if rotations == [0,0,0]:
new = array
else:
x, y, z = rotations
rot_x = rotation_matrix(x*np.pi/180., [1, 0, 0])[:3,:3]
rot_y = rotation_matrix(y*np.pi/180., [0, 1, 0])[:3,:3]
rot_z = rotation_matrix(z*np.pi/180., [0, 0, 1])[:3,:3]
rot = np.dot(rot_z, np.dot(rot_y, rot_x))
new = np.array([np.dot(rot, coord) for coord in array])
new[:,0] += transpose[0]
new[:,1] += transpose[1]
new[:,2] += transpose[2]
return new
def combine_molecules(self, other_mol, self_atoms=False, other_atoms=False,
self_rotation=[0,0,0], other_rotation=[0,0,0],
self_transpose=[0,0,0], other_transpose=[0,0,0],
self_opt=True, other_opt=True,
charge=None, multiplicity=None,
out_name=False, descript='', overwrite=False,
active=False, background='white',
represent='ball_stick', rotations=[[0., 0., 0.]], zoom=1.,
width=300, height=300, axis_length=0, ipyimg=True,
folder_obj=None):
""" transpose in nanometers """
mol = self._create_molecule(optimised=self_opt)
if self_atoms:
self_atoms = self.get_atom_group(self_atoms)
self_indxs = np.array(self_atoms) - 1
mol.r_array = mol.r_array[self_indxs]
mol.type_array = mol.type_array[self_indxs]
mol.r_array = self._array_transformation(mol.r_array,
self_rotation, self_transpose)
mol_atoms = [i+1 for i in range(len(mol.type_array))]
other = other_mol._create_molecule(optimised=other_opt)
if other_atoms:
other_atoms = other_mol.get_atom_group(other_atoms)
other_indxs = np.array(other_atoms) - 1
other.r_array = other.r_array[other_indxs]
other.type_array = other.type_array[other_indxs]
other.r_array = self._array_transformation(other.r_array,
other_rotation, other_transpose)
other_atoms = [i+1+len(mol.type_array) for i in range(len(other.type_array))]
mol.r_array = np.concatenate([mol.r_array, other.r_array])
mol.type_array = np.concatenate([mol.type_array, other.type_array])
mol.guess_bonds()
if out_name:
self._write_init_file(mol, out_name, descript, overwrite,
charge=charge, multiplicity=multiplicity,
folder_obj=folder_obj)
colorlist = self._get_highlight_colors(len(mol.type_array),
[mol_atoms, other_atoms], active)
return self._show_molecule(mol, active=active,
represent=represent,
rotations=rotations, zoom=zoom,
background=background, colorlist=colorlist,
axis_length=axis_length,
width=width, height=height, ipyimg=ipyimg,
)
def _get_charge_colors(self, relative=False, minval=-1, maxval=1, alpha=None):
charges = self._read_data('_nbo_data', 'atomcharges')['natural']
if relative: minval, maxval = (min(charges), max(charges))
norm = mpl.colors.Normalize(vmin=minval, vmax=maxval)
cmap = cm.bwr
m = cm.ScalarMappable(norm=norm, cmap=cmap)
colors=m.to_rgba(charges, alpha=alpha, bytes=True)
return colors
def show_nbo_charges(self, gbonds=True, active=False, background='white',
relative=False, minval=-1, maxval=1,
represent='vdw', rotations=[[0., 0., 0.]], zoom=1.,
width=300, height=300, axis_length=0, lines=[], ipyimg=True):
""" show optimised geometry coloured by charge from nbo analysis """
colorlist = self._get_charge_colors(relative, minval, maxval)
molecule = self._create_molecule(optimised=True, gbonds=gbonds)
return self._show_molecule(molecule, active=active,
represent=represent,
rotations=rotations, zoom=zoom,
background=background, colorlist=colorlist,
lines=lines, axis_length=axis_length,
width=width, height=height, ipyimg=ipyimg)
def get_orbital_count(self):
"""return number of orbitals """
moenergies = self._read_data('_nbo_data', "moenergies")[0]
return int(moenergies.shape[0])
def _find_nearest_above(self, my_array, target):
diff = my_array - target
mask = np.ma.less_equal(diff, 0)
# We need to mask the negative differences and zero
# since we are looking for values above
if np.all(mask):
return None # returns None if target is greater than any value
masked_diff = np.ma.masked_array(diff, mask)
return masked_diff.argmin()
def _find_nearest_below(self, my_array, target):
diff = my_array - target
mask = np.ma.greater_equal(diff, 0)
# We need to mask the positive differences and zero
# since we are looking for values above
if np.all(mask):
return None # returns None if target is lower than any value
masked_diff = np.ma.masked_array(diff, mask)
return masked_diff.argmax()
def get_orbital_homo_lumo(self):
"""return orbital numbers of homo and lumo """
homo = self._read_data('_nbo_data', 'homos')[-1]+1
lumo = homo + 1
#moenergies = self._read_data('_nbo_data', "moenergies")[0]
#homo = self._find_nearest_below(moenergies, 0.) + 1
#lumo = self._find_nearest_above(moenergies, 0.) + 1
return homo, lumo
def get_orbital_energies(self, orbitals, eunits='eV'):
"""the orbital energies for listed orbitals
Parameters
----------
orbitals : int or iterable of ints
the orbital(s) to return energies for (starting at 1)
eunits : str
the units of energy
Returns
-------
moenergies : numpy.array
energy for each orbital
"""
orbitals = np.array(orbitals, ndmin=1, dtype=int)
assert np.all(orbitals>0) and np.all(orbitals<=self.get_orbital_count()), (
'orbitals must be in range 1 to number of orbitals')
moenergies = self._read_data('_nbo_data', "moenergies")[0]
if not eunits=='eV':
moenergies = convertor(moenergies, 'eV', eunits)
return moenergies[orbitals-1]
def yield_orbital_images(self, orbitals, iso_value=0.02, extents=(2,2,2),
transparent=True, alpha=0.5, wireframe=True, background='white',
bond_color=(255, 0, 0), antibond_color=(0, 255, 0),
resolution=100, gbonds=True, represent='ball_stick',
rotations=[[0., 0., 0.]], zoom=1.,
width=300, height=300, axis_length=0, lines=[], ipyimg=True):
"""yield orbital images
Parameters
----------
orbitals : int or list of ints
the orbitals to show (in range 1 to number of orbitals)
iso_value : float
The value for which the function should be constant.
extents : (float, float, float)
+/- x,y,z to extend the molecule geometrty when constructing the surface
transparent : bool
whether iso-surface should be transparent (based on alpha value)
alpha :
alpha value of iso-surface
wireframe :
whether iso-surface should be wireframe (or solid)
background : matplotlib.colors
background color
bond_color :
color of bonding orbital surface in RGB format
antibond_color :
color of anti-bonding orbital surface in RGB format
resolution : int
The number of grid point to use for the surface. An high value will
give better quality but lower performance.
gbonds : bool
guess bonds between atoms (via distance)
represent : str
representation of molecule ('none', 'wire', 'vdw' or 'ball_stick')
zoom : float
zoom level of images
width : int
width of original images
height : int
height of original images (although width takes precedent)
axis_length : float
length of x,y,z axes in negative and positive directions
lines : [start_coord, end_coord, start_color, end_color, width, dashed]
lines to add to image
ipyimg : bool
whether to return an IPython image, PIL image otherwise
Returns
-------
mol : IPython.display.Image or PIL.Image
an image of the molecule in the format specified by ipyimg
"""
warnings.warn('Orbitals are currently an experimental feature')
orbitals = np.array(orbitals, ndmin=1, dtype=int)
assert np.all(orbitals>0) and np.all(orbitals<=self.get_orbital_count()), (
'orbitals must be in range 1 to number of orbitals')
r, g, b = bond_color
bond_rgba = (r, g, b, int(255*alpha))
r, g, b = antibond_color
antibond_rgba = (r, g, b, int(255*alpha))
#To fix issue with rotations
#TODO could probably do this better (no self._t_matrix)
alignto = self._alignment_atom_indxs[:]
self._alignment_atom_indxs = None
r_array = self._create_molecule(optimised=True).r_array
self._alignment_atom_indxs = alignto
molecule = self._create_molecule(optimised=True, gbonds=gbonds)
mocoeffs = self._read_data('_nbo_data', "mocoeffs")
gbasis = self._read_data('_nbo_data', "gbasis")
for orbital in orbitals:
coefficients = mocoeffs[0][orbital-1]
f = molecular_orbital(r_array.astype('float32'),
coefficients.astype('float32'),
gbasis)
surfaces = []
b_iso = get_isosurface(r_array, f, iso_value, bond_rgba,
resolution=resolution)
if b_iso:
verts, normals, colors = b_iso
verts = self._apply_transfom_matrix(self._t_matrix, verts)
normals = self._apply_transfom_matrix(self._t_matrix, normals)
surfaces.append([verts, normals, colors, transparent, wireframe])
a_iso = get_isosurface(molecule.r_array, f, -iso_value, antibond_rgba,
resolution=resolution)
if a_iso:
averts, anormals, acolors = a_iso
averts = self._apply_transfom_matrix(self._t_matrix, averts)
anormals = self._apply_transfom_matrix(self._t_matrix, anormals)
surfaces.append([averts, anormals, acolors, transparent,wireframe])
yield self._show_molecule(molecule,
represent=represent, background=background,
rotations=rotations, zoom=zoom,
surfaces=surfaces, transparent=False,
lines=lines, axis_length=axis_length,
width=width, height=height, ipyimg=ipyimg)
def show_active_orbital(self, orbital, iso_value=0.03, alpha=0.5,
bond_color=(255, 0, 0), antibond_color=(0, 255, 0),
gbonds=True):
"""get interactive representation of orbital
Parameters
----------
orbital : int
the orbital to show (in range 1 to number of orbitals)
iso_value : float
The value for which the function should be constant.
alpha :
alpha value of iso-surface
bond_color :
color of bonding orbital surface in RGB format
antibond_color :
color of anti-bonding orbital surface in RGB format
gbonds : bool
guess bonds between atoms (via distance)
"""
orbital = np.array(orbital, ndmin=1, dtype=int)
assert np.all(orbital>0) and np.all(orbital<=self.get_orbital_count()), (
'orbital must be in range 1 to number of orbitals')
orbital = orbital[0]
r, g, b = bond_color
bond_rgba = (r, g, b, int(255*alpha))
r, g, b = antibond_color
antibond_rgba = (r, g, b, int(255*alpha))
molecule = self._create_molecule(optimised=True, gbonds=gbonds)
mocoeffs = self._read_data('_nbo_data', "mocoeffs")
gbasis = self._read_data('_nbo_data', "gbasis")
coefficients = mocoeffs[0][orbital-1]
f = molecular_orbital(molecule.r_array.astype('float32'),
coefficients.astype('float32'),
gbasis)
mv = MolecularViewer(molecule.r_array, { 'atom_types': molecule.type_array,
'bonds': molecule.bonds })
mv.wireframe()
mv.add_isosurface(f, isolevel=iso_value, color=self._rgb_to_hex(bond_rgba))
mv.add_isosurface(f, isolevel=-iso_value, color=self._rgb_to_hex(antibond_rgba))
return mv
def _converter(self, val, unit1, unit2):
multiple = {('nm', 'nm') : 1.,
('nm', 'Angstrom') : 10.}
return val * multiple[(unit1, unit2)]
def calc_min_dist(self, idx_list1, idx_list2, optimisation=True, units='nm',
ignore_missing=True):
""" indexes start at 1 """
if optimisation:
molecule = self._read_data('_opt_data', 'molecule')
else:
molecule = self._read_data('_init_data', 'molecule')
idx_list1 = self.get_atom_group(idx_list1)
idx_list2 = self.get_atom_group(idx_list2)
# remove atoms not in molecule
if ignore_missing:
idx_list1 = [idx for idx in idx_list1[:] if idx <= molecule.n_atoms]
idx_list2 = [idx for idx in idx_list2[:] if idx <= molecule.n_atoms]
if not idx_list1 or not idx_list2:
return np.nan
indx_combis = cartesian([idx_list1, idx_list2])
c1 = molecule.r_array[indx_combis[:, 0]-1]
c2 = molecule.r_array[indx_combis[:, 1]-1]
dist = np.min(np.linalg.norm(c1-c2, axis=1))
return self._converter(dist, 'nm', units)
def calc_bond_angle(self, indxs, optimisation=True, mol=None):
""" Returns the angle in degrees between three points """
if mol:
molecule = mol
elif optimisation:
molecule = self._read_data('_opt_data', 'molecule')
else:
molecule = self._read_data('_init_data', 'molecule')
v1 = molecule.r_array[indxs[0]-1] - molecule.r_array[indxs[1]-1]
v2 = molecule.r_array[indxs[2]-1] - molecule.r_array[indxs[1]-1]
cosang = np.dot(v1, v2)
sinang = np.linalg.norm(np.cross(v1, v2))
return np.degrees(np.arctan2(sinang, cosang))
def calc_dihedral_angle(self, indxs, optimisation=True, mol=None):
""" Returns the angle in degrees between four points """
if mol:
molecule = mol
elif optimisation:
molecule = self._read_data('_opt_data', 'molecule')
else:
molecule = self._read_data('_init_data', 'molecule')
p = np.array([molecule.r_array[indxs[0]-1], molecule.r_array[indxs[1]-1],
molecule.r_array[indxs[2]-1], molecule.r_array[indxs[3]-1]])
b = p[:-1] - p[1:]
b[0] *= -1
v = np.array( [ v - (v.dot(b[1])/b[1].dot(b[1])) * b[1] for v in [b[0], b[2]] ] )
# Normalize vectors
v /= np.sqrt(np.einsum('...i,...i', v, v)).reshape(-1,1)
b1 = b[1] / np.linalg.norm(b[1])
x = np.dot(v[0], v[1])
m = np.cross(v[0], b1)
y = np.dot(m, v[1])
angle = np.degrees(np.arctan2( y, x ))
return angle #np.mod(angle, 360)
def calc_polar_coords_from_plane(self, p1, p2, p3, c, optimisation=True,
units='nm'):
""" returns the distance r and angles theta, phi of atom c
to the circumcenter of the plane formed by [p1, p2, p3]
the plane formed will have;
x-axis along p1,
y-axis anticlock-wise towards p2,
z-axis normal to the plane
theta (azimuth) is the in-plane angle from the x-axis towards the y-axis
phi (inclination) is the out-of-plane angle from the x-axis towards
the z-axis
"""
alignto = self._alignment_atom_indxs[:]
self._alignment_atom_indxs = (p1, p2, p3)
if optimisation:
molecule = self._create_molecule(optimised=True)
else:
molecule = self._create_molecule(optimised=False)
if len(molecule.r_array)<c:
self._alignment_atom_indxs = alignto
return np.nan, np.nan, np.nan
x, y, z = molecule.r_array[c-1]
r = self._converter(sqrt(x*x+y*y+z*z), 'nm', units)
theta = degrees(atan2(y, x))
phi = degrees(atan2(z, x))
self._alignment_atom_indxs = alignto
return r, theta, phi
def calc_2plane_angle(self, p1, p2, optimisation=True):
"""return angle of planes """
a1, a2, a3 = self.get_atom_group(p1)
b1, b2, b3 = self.get_atom_group(p2)
if optimisation:
molecule = self._read_data('_opt_data', 'molecule')
else:
molecule = self._read_data('_init_data', 'molecule')
v1a = molecule.r_array[a2-1] - molecule.r_array[a1-1]
v2a = molecule.r_array[a3-1] - molecule.r_array[a1-1]
v1b = molecule.r_array[b2-1] - molecule.r_array[b1-1]
v2b = molecule.r_array[b3-1] - molecule.r_array[b1-1]
vnormala = np.cross(v1a,v2a)
vnormalb = np.cross(v1b,v2b)
cos_theta = np.dot(vnormala, vnormalb)/(
np.linalg.norm(vnormala)*np.linalg.norm(vnormalb))
#account for rounding errors
if cos_theta > 1.: cos_theta = 1.
if cos_theta < -1.: cos_theta = -1.
return degrees(acos(cos_theta))
def calc_opt_trajectory(self, atom, plane=[]):
""" calculate the trajectory of an atom as it is optimised,
relative to a plane of three atoms """
alignto = self._alignment_atom_indxs[:]
self._alignment_atom_indxs = plane
#get coord from init
mol = self._create_molecule(optimised=False)
init = mol.r_array[atom-1]
#get coords from opt
opts=[]
for data in self._prev_opt_data + [self._opt_data]:
run = []
for n in range(len(data.read('atomcoords'))):
mol = self._create_molecule(data=data, opt_step=n)
run.append(mol.r_array[atom-1])
opts.append(np.array(run))
self._alignment_atom_indxs = alignto
return init, opts
_SUFFIXES = {1: 'st', 2: 'nd', 3: 'rd'}
def _ordinal(self, num):
# I'm checking for 10-20 because those are the digits that
# don't follow the normal counting scheme.
if 10 <= num % 100 <= 20:
suffix = 'th'
else:
# the second parameter is a default.
suffix = self._SUFFIXES.get(num % 10, 'th')
return str(num) + suffix
def plot_opt_trajectory(self, atom, plane=[], ax_lims=None, ax_labels=False):
"""plot the trajectory of an atom as it is optimised,
relative to a plane of three atoms """
init, opts = self.calc_opt_trajectory(atom, plane)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(init[0], init[1], init[2], c='r',
s=30, label='Initial Position')
ax.scatter(opts[-1][-1,0], opts[-1][-1,1], opts[-1][-1,2], c=['g'],
s=30, label='Optimised Position')
for i, opt in enumerate(opts):
ax.plot(opt[:,0], opt[:,1], opt[:,2],
label='{0} optimisation'.format(self._ordinal(i+1)))
mol = self._create_molecule().r_array
a,b,c=plane
ax.scatter(*mol[a-1], c='k', marker='^', s=30, label='Atom {0}'.format(a))
ax.scatter(*mol[b-1], c='k', marker='o', s=30, label='Atom {0}'.format(b))
ax.scatter(*mol[c-1], c='k', marker='s', s=30, label='Atom {0}'.format(c))
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
if ax_lims:
x, y, z = ax_lims
ax.set_xlim3d(-x, x)
ax.set_ylim3d(-y, y)
ax.set_zlim3d(-z, z)
if ax_labels:
ax.set_xlabel('x (nm)')
ax.set_ylabel('y (nm)')
ax.set_zlabel('z (nm)')
return ax
def calc_nbo_charge(self, atoms=[]):
""" returns total charge of the atoms """
charges = self._read_data('_nbo_data', 'atomcharges')['natural']
if atoms==[]:
return np.sum(charges)
atoms = self.get_atom_group(atoms)
atoms = np.array(atoms) -1 # 1->0 base
try:
subcharges = charges[atoms]
except IndexError:
return np.nan
return np.sum(subcharges)
def calc_nbo_charge_center(self, p1, p2, p3, positive=True, units='nm',
atoms=[]):
""" returns the distance r amd angles theta, phi of the positive/negative
charge center to the circumcenter of the plane formed by [p1, p2, p3]
the plane formed will have;
x-axis along p1,
y-axis anticlock-wise towards p2,
z-axis normal to the plane
theta (azimuth) is the in-plane angle from the x-axis towards the y-axis
phi (inclination) is the out-of-plane angle from the x-axis towards
the z-axis
"""
molecule = self._create_molecule(alignment_atoms=(p1, p2, p3))
charges = self._read_data('_nbo_data', 'atomcharges')['natural']
coords = molecule.r_array
atoms = self.get_atom_group(atoms)
if atoms:
atoms = np.array(atoms) -1 # 1->0 base
charges = charges[atoms]
coords = coords[atoms]
if positive:
weighted_coords = charges[charges>0] * coords[charges>0].T
else:
weighted_coords = -1*charges[charges<0] * coords[charges<0].T
charge_center = np.mean(weighted_coords.T, axis=0)
x, y, z = charge_center
r = self._converter(sqrt(x*x+y*y+z*z), 'nm', units)
theta = degrees(atan2(y, x))
phi = degrees(atan2(z, x))
return r, theta, phi
def get_sopt_analysis(self, eunits='kJmol-1', atom_groups=[],
charge_info=False):
"""interactions between "filled" (donor) Lewis-type
Natural Bonding Orbitals (NBOs) and "empty" (acceptor) non-Lewis NBOs,
using Second Order Perturbation Theory (SOPT)
Parameters
----------
eunits : str
the units of energy to return
atom_groups : [list or str, list or str]
restrict interactions to between two lists (or identifiers) of atom indexes
charge_info : bool
include charge info for atoms
(under headings 'A_Charges' and 'D_Charges')
Returns
-------
analysis : pandas.DataFrame
a table of interactions
"""
#sopt = copy.deepcopy(self._read_data('_nbo_data', 'sopt'))
sopt = self._read_data('_nbo_data', 'sopt')
df = pd.DataFrame(sopt,
columns=['Dtype', 'Donors', 'Atype', 'Acceptors', 'E2'])
if atom_groups:
group1, group2 = atom_groups
group1 = self.get_atom_group(group1)
group2 = self.get_atom_group(group2)
match_rows=[]
for indx, rw in df.iterrows():
if set(group1).issuperset(rw.Acceptors) and set(group2).issuperset(rw.Donors):
match_rows.append(rw)
elif set(group2).issuperset(rw.Acceptors) and set(group1).issuperset(rw.Donors):
match_rows.append(rw)
df = pd.DataFrame(match_rows)
if not eunits=='kcal':
df.E2 = convertor(df.E2, 'kcal', eunits)
typ = self._read_data('_nbo_data', 'molecule').type_array
df['D_Symbols'] = df.Donors.apply(lambda x: [typ[i-1] for i in x])
df['A_Symbols'] = df.Acceptors.apply(lambda x: [typ[i-1] for i in x])
if charge_info:
chrg= self._read_data('_nbo_data', 'atomcharges')['natural']
df['D_Charges'] = df.Donors.apply(lambda x: [chrg[i-1] for i in x])
df['A_Charges'] = df.Acceptors.apply(lambda x: [chrg[i-1] for i in x])
return df[['Dtype', 'Donors', 'D_Symbols', 'D_Charges',
'Atype', 'Acceptors', 'A_Symbols', 'A_Charges',
'E2']]
else:
return df[['Dtype', 'Donors', 'D_Symbols',
'Atype', 'Acceptors', 'A_Symbols',
'E2']]
def get_hbond_analysis(self, min_energy=0., atom_groups=[], eunits='kJmol-1'):
"""EXPERIMENTAL! hydrogen bond analysis (DH---A),
using Second Order Bond Perturbation Theiry
Parameters
----------
min_energy : float
the minimum interaction energy to report
eunits : str
the units of energy to return
atom_groups : [list or str, list or str]
restrict interactions to between two lists (or identifiers) of atom indexes
Returns
-------
analysis : pandas.DataFrame
a table of interactions
Notes
-----
uses a strict definition of a hydrogen bond as:
interactions between "filled" (donor) Lewis-type Lone Pair (LP) NBOs
and "empty" (acceptor) non-Lewis Bonding (BD) NBOs
"""
df = self.get_sopt_analysis(atom_groups=atom_groups, eunits=eunits)
df = df[df.E2 >= min_energy]
df = df[df.A_Symbols.apply(lambda x: 'H' in x) &
df.Dtype.str.contains('LP') &
df.Atype.str.contains('BD*')]
return df
def calc_sopt_energy(self, atom_groups=[], eunits='kJmol-1', no_hbonds=False):
"""calculate total energy of interactions between "filled" (donor) Lewis-type
Natural Bonding Orbitals (NBOs) and "empty" (acceptor) non-Lewis NBOs,
using Second Order Perturbation Theory
Parameters
----------
eunits : str
the units of energy to return
atom_groups : [list or str, list or str]
restrict interactions to between two lists (or identifiers) of atom indexes
no_hbonds : bool
whether to ignore H-Bonds in the calculation
Returns
-------
analysis : pandas.DataFrame
a table of interactions
"""
df = self.get_sopt_analysis(atom_groups=atom_groups, eunits=eunits)
if no_hbonds:
dfh = df[df.A_Symbols.apply(lambda x: 'H' in x) &
df.Dtype.str.contains('LP') &
df.Atype.str.contains('BD*')]
df = df.loc[set(df.index).difference(dfh.index)]
return df.E2.sum()
def show_sopt_bonds(self, min_energy=20., cutoff_energy=0., atom_groups=[],
bondwidth=5, eunits='kJmol-1', no_hbonds=False,
gbonds=True, active=False,
represent='ball_stick', rotations=[[0., 0., 0.]], zoom=1.,
width=300, height=300, axis_length=0, lines=[],
relative=False, minval=-1, maxval=1,
alpha=0.5, transparent=True, background='white',
ipyimg=True):
"""visualisation of interactions between "filled" (donor) Lewis-type
Natural Bonding Orbitals (NBOs) and "empty" (acceptor) non-Lewis NBOs,
using Second Order Perturbation Theory
"""
df = self.get_sopt_analysis(atom_groups=atom_groups, eunits=eunits)
df = df[df.E2 >= min_energy]
if no_hbonds:
dfh = self.get_hbond_analysis(min_energy=min_energy, eunits=eunits,
atom_groups=atom_groups)
df = df.loc[set(df.index).difference(dfh.index)]
molecule = self._create_molecule(gbonds=gbonds)
drawlines = lines[:]
for i, rw in df.iterrows():
d_coord = np.mean([molecule.r_array[d-1] for d in rw.Donors], axis=0)
a_coord = np.mean([molecule.r_array[a-1] for a in rw.Acceptors], axis=0)
dashed = rw.E2 < cutoff_energy
drawlines.append([d_coord, a_coord, 'blue', 'red',
max([1, bondwidth-1]), dashed])
colorlist = self._get_charge_colors(relative, minval, maxval, alpha=alpha)
return self._show_molecule(molecule, active=active,
represent=represent,
rotations=rotations, zoom=zoom,
colorlist=colorlist,
lines=drawlines, axis_length=axis_length,
width=width, height=height, linestyle='lines',
transparent=transparent, background=background,
ipyimg=ipyimg)
def calc_hbond_energy(self, atom_groups=[], eunits='kJmol-1'):
df = self.get_hbond_analysis(atom_groups=atom_groups, eunits=eunits)
return df.E2.sum()
def show_hbond_analysis(self, min_energy=0., atom_groups=[],
cutoff_energy=0., eunits='kJmol-1', bondwidth=5,
gbonds=True, active=False,
represent='ball_stick', rotations=[[0., 0., 0.]], zoom=1.,
width=300, height=300, axis_length=0, lines=[],
relative=False, minval=-1, maxval=1,
alpha=0.5, transparent=True, background='white',ipyimg=True):
"""EXPERIMENTAL! hydrogen bond analysis DH---A
For a hydrogen bond to occur there must be both a hydrogen donor and an
acceptor present. The donor in a hydrogen bond is the atom to which the
hydrogen atom participating in the hydrogen bond is covalently bonded,
and is usually a strongly electronegative atom such as N, O, or F. The
hydrogen acceptor is the neighboring electronegative ion or molecule,
and must posses a lone electron pair in order to form a hydrogen bond.
Since the hydrogen donor is strongly electronegative, it pulls the
covalently bonded electron pair closer to its nucleus, and away from
the hydrogen atom. The hydrogen atom is then left with a partial
positive charge, creating a dipole-dipole attraction between the
hydrogen atom bonded to the donor, and the lone electron pair on the acceptor.
"""
df = self.get_hbond_analysis(min_energy=min_energy, eunits=eunits,
atom_groups=atom_groups)
molecule = self._create_molecule(gbonds=gbonds)
drawlines = lines[:]
for i, rw in df.iterrows():
d_coord = np.mean([molecule.r_array[d-1] for d in rw.Donors], axis=0)
h_indx = rw.A_Symbols.index('H')
a_coord = molecule.r_array[rw.Acceptors[h_indx]-1]
dashed = rw.E2 < cutoff_energy
drawlines.append([d_coord, a_coord, 'blue', 'red',
max([1, bondwidth-1]), dashed])
colorlist = self._get_charge_colors(relative, minval, maxval, alpha=alpha)
return self._show_molecule(molecule, active=active,
represent=represent, background=background,
rotations=rotations, zoom=zoom,
colorlist=colorlist,
lines=drawlines, axis_length=axis_length,
width=width, height=height, linestyle='lines',
transparent=transparent,
ipyimg=ipyimg)
def _get_dos(self, mol, atoms=[], dos_type='all', eunits='eV',
per_energy=1., lbound=None, ubound=None):
num_mo = mol.get_orbital_count()
if not lbound:
lbound = mol.get_orbital_energies(1, eunits=eunits)
if not ubound:
ubound = mol.get_orbital_energies(mol.get_orbital_count(), eunits=eunits)
#round down/up to nearest multiple of per_energy
lenergy_bound = lbound - (lbound % per_energy)
uenergy_bound = ubound + (per_energy - ubound % per_energy)
num_bins = int((uenergy_bound-lenergy_bound) / per_energy)
if atoms:
df_occupancy = pd.DataFrame(mol._read_data('_nbo_data', 'nbo_occupancy'),
columns=['NBO', 'Atom', 'Occ'])
sub_df = df_occupancy[df_occupancy.Atom.isin(atoms)].groupby('NBO').sum()
sub_df = sub_df.reindex(range(1, num_mo+1))
weights = sub_df.Occ.fillna(0)/100.
else:
weights=None
freq, e_edges = np.histogram(
mol.get_orbital_energies(np.arange(1, num_mo+1), eunits=eunits),
bins=num_bins, range=(lenergy_bound, uenergy_bound),
density=False, weights=weights)
#energy = bin_edges[:-1] + 0.5*(bin_edges[1:]-bin_edges[:-1])
df = pd.DataFrame(zip(e_edges[:-1], e_edges[1:], freq),
columns=['MinEnergy', 'MaxEnergy', 'Freq'])
homo, lumo = mol.get_orbital_homo_lumo()
if dos_type == 'all':
pass
elif dos_type == 'homo':
df = df[df.MinEnergy <= mol.get_orbital_energies(homo, eunits=eunits)[0]]
elif dos_type == 'lumo':
df = df[df.MaxEnergy >= mol.get_orbital_energies(lumo, eunits=eunits)[0]]
else:
raise ValueError('dos_type must be; all, homo or lumo')
return df
def _plot_single_dos(self, mol, atoms=[], dos_type='all',
eunits='eV', per_energy=1., lbound=None, ubound=None,
ax=None,
color='g', label='',
line=True, linestyle='-', linewidth=2, linealpha = 1,
fill=True, fillalpha = 1, df=None):
if df is None:
df = self._get_dos(mol, atoms=atoms, dos_type=dos_type,
per_energy=per_energy, eunits=eunits,
lbound=lbound, ubound=ubound)
energy = df.set_index('Freq').stack().values
freq = df.set_index('Freq').stack().index.droplevel(level=1).values
if not ax:
fig, ax = plt.subplots()
if line:
ax.plot(freq, energy, label=label, color=color,
alpha=linealpha, linestyle=linestyle, linewidth=linewidth)
#drawstyle='steps-mid')
else:
ax.plot([], [], label=label, color=color, linewidth=linewidth)
if fill:
ax.fill_betweenx(energy, freq, color=color, alpha=fillalpha)
return ax
def plot_dos(self, eunits='eV', per_energy=1., lbound=None, ubound=None,
color_homo='g', color_lumo='r',
homo_lumo_lines=True,homo_lumo_values=True,band_gap_value=True,
legend_size=10, ax=None):
"""plot Density of States and HOMO/LUMO gap
Parameters
----------
eunits : str
unit of energy
per_energy : float
energy interval to group states by
lbound : float
lower bound energy
ubound : float
upper bound energy
color_homo : matplotlib.colors
color of homo in matplotlib format
color_lumo : matplotlib.colors
color of lumo in matplotlib format
homo_lumo_lines : bool
draw lines at HOMO and LUMO energies
homo_lumo_values : bool
annotate HOMO and LUMO lines with exact energy values
band_gap_value : bool
annotate inbetween HOMO and LUMO lines with band gap value
legend_size : int
the font size (in pts) for the legend
ax : matplotlib.Axes
an existing axes to plot the data on
Returns
-------
plot : matplotlib.axes.Axes
plotted optimisation data
"""
if not ax:
fig, ax = plt.subplots()
ax.set_xlabel('Density of States (per {0} {1})'.format(per_energy, eunits))
ax.set_ylabel('Energy ({})'.format(eunits))
mol = self
self._plot_single_dos(mol, ax=ax, label='HOMO', dos_type='homo', color=color_homo,
line=False, fillalpha=0.3,
per_energy=per_energy, eunits=eunits, lbound=lbound, ubound=ubound)
self._plot_single_dos(mol, ax=ax, label='LUMO', dos_type='lumo', color=color_lumo,
fillalpha=0.3, line=False,
per_energy=per_energy, eunits=eunits, lbound=lbound, ubound=ubound)
homo, lumo = mol.get_orbital_energies(mol.get_orbital_homo_lumo())
xlower, xupper = ax.get_xbound()
if homo_lumo_lines:
ax.plot([xlower, xupper], [homo,homo], 'g-', linewidth=2)
ax.plot([xlower, xupper], [lumo,lumo], 'r-', linewidth=2)
if homo_lumo_values:
ax.annotate('{0}{1}'.format(homo.round(1), eunits), xy=(xupper, homo),
xytext=(-5, -10), ha='right', textcoords='offset points')
ax.annotate('{0}{1}'.format(lumo.round(1), eunits), xy=(xupper, lumo),
xytext=(-5, 5), ha='right', textcoords='offset points')
if band_gap_value:
gap = lumo-homo
ax.annotate('{0}{1}'.format(gap.round(1), eunits), xy=(xupper, homo+0.5*gap),
xytext=(-5, -4), ha='right', textcoords='offset points')
ax.set_ybound(lbound, ubound)
if legend_size:
ax.legend(framealpha=0.5, prop={'size':legend_size})
ax.grid(True)
return ax
def _img_to_plot(self, x, y, image, ax=None, zoom=1):
"""add image to matplotlib axes at (x,y) """
if ax is None:
ax = plt.gca()
im = OffsetImage(image, zoom=zoom)
artists = []
ab = AnnotationBbox(im, (x, y), xycoords='data', frameon=False)
artists.append(ax.add_artist(ab))
#ax.update_datalim(np.column_stack([x, y]))
ax.autoscale(tight=False)
return artists
# TODO get fixed atoms from scan file
def plot_pes_scans(self, fixed_atoms, eunits='kJmol-1',
img_pos='', rotation=[0., 0., 0.], zoom=1, order=1):
"""plot Potential Energy Scan
Parameters
----------
img_pos : <'','local_mins','local_maxs','global_min','global_max'>
position image(s) of molecule conformation(s) on plot
rotation : [float, float, float]
rotation of molecule image(s)
"""
scan_datas = self._pes_data
if len(fixed_atoms) == 4:
xlabel = 'Dihedral Angle'
func = self.calc_dihedral_angle
elif len(fixed_atoms)==3:
xlabel = 'Valence Angle'
func = self.calc_bond_angle
else:
raise Exception('not 3 or 4 fixed atoms')
angles = []
energies = []
mols = []
for scan in scan_datas:
for i in range(scan.read('nscans')):
mol = scan.read('molecule', scan=i)
mols.append(self._create_molecule(data=scan, scan_step=i))
angles.append(func(fixed_atoms, mol=mol))
energies.extend(scan.read('scanenergies'))
# remove duplicate angles and sort by angle
# so that the local max are found correctly
df = pd.DataFrame({'energy':convertor(np.array(energies), 'eV', eunits),
'angle':angles, 'mol':mols})
df['rounded'] = df.angle.round(2) #rounding errors?
df.drop_duplicates('rounded', inplace=True)
df.sort('angle', inplace=True)
angles = np.array(df.angle.tolist())
energies = np.array(df.energy.tolist())
fig, ax = plt.subplots()
ax.plot(angles, energies-energies.min())
ax.scatter(angles, energies-energies.min())
ax.set_ylabel('Relative Energy ({0})'.format(eunits))
ax.set_xlabel(xlabel)
ax.grid(True)
feature_dict = {
'':[],
'local_maxs' : df.index[argrelextrema(energies, np.greater, mode='wrap', order=order)[0]],
'local_mins' : df.index[argrelextrema(energies, np.less, mode='wrap', order=order)[0]],
'global_min' : [df.energy.idxmin()],
'global_max' : [df.energy.idxmax()]}
for indx in feature_dict[img_pos]:
img = self._image_molecule(df.mol.loc[indx], rotation=rotation, represent='ball_stick')
img = self._color_to_transparent(img)
self._img_to_plot(df.angle.loc[indx], df.energy.loc[indx]-energies.min(), img, zoom=zoom, ax=ax)
return ax, df
| gpl-3.0 |
grcanosa/code-playground | scrum/wekan/wekanplot.py | 1 | 4859 | #!/usr/bin/python3
import sys
import json
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.ticker import FixedLocator
import datetime
import numpy as np
import optparse
def extract_string(text,tag):
textspli = text.split();
for t in textspli:
if tag in t:
textspli2 = t.split(":");
return textspli2[1];
def extract_strings(text,tag):
vstr = [];
textspli = text.split();
for t in textspli:
if tag in t:
textspli2 = t.split(":");
vstr.append(textspli2[1]);
return vstr;
def extract_number(text,tag):
return float(extract_string(text,tag));
def str2date(datestr):
#print(datestr,datestr[0:4],datestr[4:6],datestr[6:8])
return datetime.date(int(datestr[0:4]), int(datestr[4:6]), int(datestr[6:8]))
def get_sprint_dates(wej):
if "description" not in wej:
print("Field description must be set for the board!!!!")
print("Assumming 2 week sprint with current week as first");
no = datetime.datetime.now().date();
desc = "";
desc += "START:"+(no-datetime.timedelta(days=no.weekday())).strftime("%Y%m%d");
desc += " "
desc += "END:"+(no-datetime.timedelta(days=no.weekday())+datetime.timedelta(days=13)).strftime("%Y%m%d");
wej["description"]= desc
description = wej["description"]
start = extract_string(description, "START");
end = extract_string(description,"END");
festivos = extract_strings(description, "FESTIVO")
print("Sprint goes from "+start+" to "+end)
dates=[];
startd = str2date(start)
endd = str2date(end)
festd = [str2date(d) for d in festivos];
auxd = startd;
while auxd <= endd:
if auxd not in festd and auxd.weekday() < 5:
dates.append(auxd);
auxd = auxd + datetime.timedelta(days=1);
return dates;
def get_card_hours(a):
horas = 0;
if "HORAS:" in a["title"]:
horas = extract_number(a["title"],"HORAS");
if "description" in a and "HORAS:" in a["description"]:
horas = extract_number(a["description"],"HORAS");
if horas == 0:
print("CARD "+a["title"]+" HAS NOT HOURS SET!!!")
return horas;
def get_hours(wej):
total_h = 0;
fin_h = 0;
finListId = 0
for l in wej["lists"]:
if "DONE" in l["title"]:
finListId = l["_id"];
break;
for a in wej["cards"]:
h = get_card_hours(a);
total_h += h;
if a["listId"] == finListId:
fin_h += h;
return total_h,fin_h;
def plot_values(dates,totalh,finh,now,title="Sprint"):
today_in = -1;
for d in dates:
today_in +=1;
if d == now.date():
break;
#print(today_in);
hinc = totalh/(len(dates));
ptotalh = [totalh - d*hinc for d in range(0,len(dates)+1)];
#print(len(dates),ptotalh)
hincfin = finh / today_in;
#print(hinc,hincfin)
pfinh = [totalh - d*hincfin for d in range(0,today_in+1)];
#print(hincfin,pfinh)
fig, ax = plt.subplots()
# plot 'ABC' column, using red (r) line and label 'ABC' which is used in the legend
ax.plot(ptotalh,'k-',label="Target (%d h)"% int(ptotalh[today_in]))
ax.plot(pfinh,'b-',label="Current (%d h)"% int(pfinh[today_in]))
ax.set_xticks(range(0,len(dates)+1))
#ax.set_xticklabels(["0"]+[d.strftime("%d %b") for d in dates]) # set the ticklabels to the list of datetimes
ax.set_xticklabels([d.strftime("%a %d") for d in dates]+["END"]) # set the ticklabels to the list of datetimes
#plt.xticks(rotation=30) # rotate the xticklabels by 30 deg
plt.axvline(today_in,0,1,color="gray",dashes=[1,1])
if(pfinh[today_in] > ptotalh[today_in]):
plt.plot((today_in, today_in), (ptotalh[today_in], pfinh[today_in]), 'r-',linewidth=4,solid_capstyle="butt",label="Difference (%dh)" % round(pfinh[today_in]-ptotalh[today_in]));
else:
plt.plot((today_in, today_in), (pfinh[today_in], ptotalh[today_in]), 'g-',linewidth=4,solid_capstyle="butt",label="We are awesome!!");
ax.legend(loc=1, fontsize=10) # make a legend and place in bottom-right (loc=4)
plt.xlabel("Sprint Days");
plt.ylabel("Hours");
ax.set_ylim(0,max(pfinh)*1.1)
plt.title(title+" (Total %dh)"%totalh)
plt.show()
# plt.plot(ptotald,ptotalh,pfind,pfinh);
# #plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%d'))
# plt.gca().xaxis.set_major_locator(FixedLocator())
# plt.show()
def main(wekan_file):
f = open(wekan_file)
wej = json.load(f);
f.close()
dates = get_sprint_dates(wej)
totalh,finh = get_hours(wej);
plot_values(dates,totalh,finh,datetime.datetime.now(),title=wej["title"])
def parse_arguments(argv):
parser = optparse.OptionParser();
parser.add_option("-f","--f",help="Location of wekan file",default=None);
options,args = parser.parse_args(argv);
if options.f is None:
print("Please provide wekan json file");
parser.print_help();
exit(1);
return options.f;
if __name__ == "__main__":
wekan_file = parse_arguments(sys.argv);
sys.exit(main(wekan_file))
| mit |
andyh616/mne-python | examples/visualization/plot_topo_channel_epochs_image.py | 22 | 1861 | """
============================================================
Visualize channel over epochs as images in sensor topography
============================================================
This will produce what is sometimes called event related
potential / field (ERP/ERF) images.
One sensor topography plot is produced with the evoked field images from
the selected channels.
"""
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=True, eog=True,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, eog=150e-6))
###############################################################################
# Show event related fields images
layout = mne.find_layout(epochs.info, 'meg') # use full layout
title = 'ERF images - MNE sample data'
mne.viz.plot_topo_image_epochs(epochs, layout, sigma=0.5, vmin=-200, vmax=200,
colorbar=True, title=title)
plt.show()
| bsd-3-clause |
mrawls/BF-simulator | BF_pythonTYC.py | 1 | 12707 | from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
from astropy.io import fits
from astropy.time import Time
from PyAstronomy import pyasl
from scipy import ndimage
import pandas as pd
import gaussfitter as gf
import BF_functions as bff
'''
Program to extract radial velocities from a double-lined binary star spectrum.
Uses the Broadening Function technique.
Meredith Rawls
2014-2015
This version is especially for TYC 3559!
Based loosely on Rucinski's BFall_IDL.pro, and uses the PyAstronomy tools.
http://www.astro.utoronto.ca/~rucinski/BFdescription.html
http://www.hs.uni-hamburg.de/DE/Ins/Per/Czesla/PyA/PyA/pyaslDoc/aslDoc/svd.html
In practice, you will run this twice: once to do the initial BF, and then again
to properly fit the peaks of each BF with a Gaussian.
INPUT
infiles: single-column file with one FITS or TXT filename (w/ full path) per line
1st entry must be for the template star (e.g., arcturus or phoenix model)
(the same template is used to find RVs for both stars)
NO comments are allowed in this file
FUN FACT: unless APOGEE, these should be continuum-normalized to 1 !!!
bjdinfile: columns 0,1,2 must be filename, BJD, BCV (e.g., from IRAF bcvcorr)
top row must be for the template star (e.g., arcturus)
(the 0th column is never used, but typically looks like infiles_BF.txt)
one line per observation
comments are allowed in this file using #
gausspars: your best initial guesses for fitting gaussians to the BF peaks
the parameters are [amp1, offset1, width1, amp2, offset2, width2]
the top line is ignored (template), but must have six values
one line per observation
comments are allowed in this file using #
OUTPUT
outfile: a file that will be created with 8 columns: BJD midpoint, orbital phase,
Kepler BJD, RV1, RV1 error, RV2, RV2 error
bfoutfile: a file that contains all the BF function data (raw RV, BF, gaussian model)
'''
# (for KIC 8848288, ie TYC 3559)
#infiles = 'infiles.txt'
#bjdinfile = 'bjdfile.txt'
#gausspars = 'gaussfit.txt'
#outfile = 'rvs_revisited3_BF.txt'
#bfoutfile = 'bfoutfile3.txt'
# same as above, but for APOGEE
infiles = 'infiles_apogee.txt'
bjdinfile = 'bjdfile_apogee.txt'
gausspars = 'gaussfit_apogee.txt'
outfile = 'rvs_apogee.txt'
bfoutfile = 'bfoutfile_apogee.txt'
isAPOGEE = True # toggle to use near-IR stuff, or not
SpecPlot = True # toggle to plot spectra before BFs, or not
bjdoffset = 2454833. # difference between real BJDs and 'bjdfunny' (truncated BJDs)
amplimits = [0.8,1, 0,0.2] # limits for gaussian normalized amplitude [min1,max1,min2,max2]
threshold = 10 # margin for gaussian position (raw RV in km/s)
widlimits = [0,15, 0,40] # limits for gaussian width (km/s) [min1,max1,min2,max2]
period = 5.56648; BJD0 = 2454904.8038 # 8848288 orbital parameters
rvstd = 0; bcvstd = 0 # model template RV is 0
smoothstd = 1.0 #1.5 # stdev of Gaussian to smooth BFs by (~slit width in pixels)
m = 171 # length of the BF (must be longer if RVs are far from 0)
#w00 = 4485; n = 53000; stepV = 1.5
#w00 = 4485; n = 80000; stepV = 1.5 # testing larger, redder wavelength range
w00 = 15145; n = 15000; stepV = 1.5 # APOGEE
rvneg = -74; rvpos = 34; ymin = -0.05; ymax = 1.05 # plot limits
##########
print('Welcome to the Broadening Function party!')
print('')
print('MAKE SURE THIS IS WHAT YOU WANT:')
print('You set Porb = {0} days, BJD0 = {1} days'.format(period, BJD0))
# CREATE NEW SPECTRUM IN LOG SPACE
# This uses w00, n, and stepV, defined above. The new wavelength grid is w1.
# The BF will be evenly spaced in velocity with length m.
# The velocity steps are r (km/s/pix).
w1, m, r = bff.logify_spec(isAPOGEE, w00, n, stepV, m)
# READ IN ALL THE THINGS
specdata = bff.read_specfiles(infiles, bjdinfile, isAPOGEE)
nspec = specdata[0]; filenamelist = specdata[1]
datetimelist = specdata[2]; wavelist = specdata[3]; speclist = specdata[4]
# INTERPOLATE THE TEMPLATE AND OBJECT SPECTRA ONTO THE NEW LOG-WAVELENGTH GRID
# OPTION TO PLOT THIS (commented out for now)
##plt.figure(1)
newspeclist = []
yoffset = 0
if SpecPlot == True:
plt.axis([w1[0], w1[-1], 0, nspec+3])
plt.xlabel(r'Wavelength ({\AA})')
for i in range (0, nspec):
newspec = np.interp(w1, wavelist[i], speclist[i])
newspeclist.append(newspec)
if SpecPlot == True:
plt.plot(w1, newspec+yoffset, label=datetimelist[i].iso[0:10])#, color='b')
yoffset = yoffset + 1
if SpecPlot == True:
##plt.legend()
plt.show()
# BROADENING FUNCTION TIME
svd = pyasl.SVD()
# Single Value Decomposition
svd.decompose(newspeclist[0], m)
singularvals = svd.getSingularValues()
bflist = []
bfsmoothlist = []
for i in range (0, nspec):
# Obtain the broadening function
bf = svd.getBroadeningFunction(newspeclist[i]) # this is a full matrix
bfarray = svd.getBroadeningFunction(newspeclist[i], asarray=True)
# Smooth the array-like broadening function
# 1ST LINE - python 2.7 with old version of pandas; 2ND LINE - python 3.5 with new version of pandas
#bfsmooth = pd.rolling_window(bfarray, window=5, win_type='gaussian', std=smoothstd, center=True)
bfsmooth = pd.Series(bfarray).rolling(window=5, win_type='gaussian', center=True).mean(std=smoothstd)
# The rolling window makes nans at the start because it's a punk.
for j in range(0,len(bfsmooth)):
if np.isnan(bfsmooth[j]) == True:
bfsmooth[j] = 0
else:
bfsmooth[j] = bfsmooth[j]
bflist.append(bf)
bfsmoothlist.append(bfsmooth)
bfnormlist = []
for a in bfsmoothlist:
bfnormlist.append((a-np.min(a))/(np.max(a)-np.min(a)))
# Obtain the indices in RV space that correspond to the BF
bf_ind = svd.getRVAxis(r, 1) + rvstd - bcvstd
# OPTION TO PLOT THE SINGULAR VALUES TO SEE WHERE THEY AREN'T A MESS
# this probably isn't important, because instead of choosing which values to throw out,
# we use "Route #2" as described by Rucinski and just use the final row of the BF array
# and smooth it with a Gaussian to get rid of noise problems.
# for more info, seriously, read http://www.astro.utoronto.ca/~rucinski/SVDcookbook.html
##plt.figure(2)
#plt.semilogy(singularvals, 'b-')
#plt.xlabel('BF Index')
#plt.ylabel('Singular Values')
#plt.show()
# OPTION TO PLOT THE SMOOTHED BFs
##plt.figure(3)
plt.axis([rvneg, rvpos, -0.2, float(nspec)/2.5])
plt.xlabel('Radial Velocity (km s$^{-1}$)')
plt.ylabel('Broadening Function (arbitrary amplitude)')
yoffset = 0.0
for i in range(1, nspec):
plt.plot(bf_ind, bfsmoothlist[i]+yoffset, color='b')
yoffset = yoffset + 0.4
plt.show()
# FIT THE SMOOTHED BF PEAKS WITH TWO GAUSSIANS
# you have to have pretty decent guesses in the gausspars file for this to work.
#bffitlist = bff.gaussparty(gausspars, nspec, filenamelist, bfsmoothlist, bf_ind, threshold)
bffitlist = bff.gaussparty(gausspars, nspec, filenamelist, bfnormlist, bf_ind, amplimits, threshold, widlimits)
rvraw1 = []; rvraw2 = []; rvraw1_err = []; rvraw2_err = []
rvraw1.append(0), rvraw2.append(0), rvraw1_err.append(0), rvraw2_err.append(0)
for i in range(1, len(bffitlist)):
rvraw1.append(bffitlist[i][0][1]) # [0,1,2] is amp,rv,width for star 1; [4,5,6] is same for star2
rvraw2.append(bffitlist[i][0][4])
rvraw1_err.append(bffitlist[i][2][1])
rvraw2_err.append(bffitlist[i][2][4])
# CALCULATE ORBITAL PHASES AND FINAL RV CURVE
rvdata = bff.rvphasecalc(bjdinfile, bjdoffset, nspec, period, BJD0, rvraw1, rvraw1_err, rvraw2, rvraw2_err, rvstd, bcvstd)
phase = rvdata[0]; bjdfunny = rvdata[1]
rv1 = rvdata[2]; rv2 = rvdata[3]
rv1_err = rvdata[4]; rv2_err = rvdata[5]
g2 = open(outfile, 'w')
print('# RVs calculated with BF_python.py', file=g2)
print('#', file=g2)
print('# Porb = {0} days, BJD0 = {1} days'.format(period, BJD0), file=g2)
print('# Wavelength axis = [{0} - {1}] Angstroms'.format(w1[0], w1[-1]), file=g2)
print('#', file=g2)
print('# Template spectrum (line 0 of infiles): {0}'.format(filenamelist[0]), file=g2)
print('# RV of template, BCV of template (km/s): {0}, {1}'.format(rvstd, bcvstd), file=g2)
print('#', file=g2)
print('# List of all input spectra (infiles): {0}'.format(infiles), file=g2)
print('# Target BJD and BCV info (bjdinfile): {0}'.format(bjdinfile), file=g2)
print('# Gaussian fit guesses (gausspars): {0}'.format(gausspars), file=g2)
print('#', file=g2)
print('# BF parameters: w00 = {0}; n = {1}; stepV = {2}'.format(w00, n, stepV), file=g2)
print('# BF parameters: smoothstd = {0}; m = {1}'.format(smoothstd, m), file=g2)
print('# gaussfit: amplimits = {0}; threshold = {1}, widlimits = {2}'.format(amplimits, threshold, widlimits), file=g2)
print('#', file=g2)
print('# time, phase, adjusted_time, RV1 [km/s], error1 [km/s], RV2 [km/s], error2 [km/s]', file=g2)
print('#', file=g2)
for i in range(1, nspec):
print ('%.9f %.9f %.9f %.5f %.5f %.5f %.5f' % (bjdfunny[i] + bjdoffset, phase[i], bjdfunny[i],
rv1[i], rv1_err[i], rv2[i], rv2_err[i]), file=g2)
g2.close()
print('BJD, phase, and RVs written to %s.' % outfile)
print('Use rvplotmaker.py to plot the RV curve.')
try:
bfout = open(bfoutfile, 'w')
for idx in range(1, nspec):
print('###', file=bfout)
print('# timestamp: {0}'.format(datetimelist[idx]), file=bfout)
print('# Gaussian 1 [amp, RV +/- err, wid]: [{0:.2f}, {1:.2f} +/- {2:.2f}, {3:.2f}]'.format(bffitlist[i][0][0], rvraw1[i], rvraw1_err[i], bffitlist[i][0][2]), file=bfout)
print('# Gaussian 2 [amp, RV +/- err, wid]: [{0:.2f}, {1:.2f} +/- {2:.2f}, {3:.2f}]'.format(bffitlist[i][0][3], rvraw2[i], rvraw2_err[i], bffitlist[i][0][5]), file=bfout)
print('# Uncorrected_RV, BF_amp, Gaussian_fit', file=bfout)
print('###', file=bfout)
for vel, amp, modamp in zip(bf_ind, bfsmoothlist[idx], bffitlist[idx][1]):
print(vel, amp, modamp, file=bfout)
bfout.close()
except:
print('No BF outfile specified, not saving BF data to file')
# handy little gaussian function maker
def gaussian(x, amp, mu, sig): # i.e., (xarray, amp, rv, width)
return amp * np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
# PLOT THE FINAL SMOOTHED BFS + GAUSSIAN FITS IN INDIVIDUAL PANELS
# manually adjust this multi-panel plot based on how many spectra you have
#plt.figure(4)
windowcols = 3 #4 # how many window columns there should be
#windowrows = 6
windowrows = int([np.rint((nspec-1)/windowcols) if (np.float(nspec-1)/windowcols)%windowcols == 0 else np.rint((nspec-1)/windowcols)+1][0])
xmin = rvneg
xmax = rvpos
#gaussxs = np.arange(-200, 200, 0.1)
fig = plt.figure(1, figsize=(15,10))
fig.text(0.5, 0.04, 'Uncorrected Radial Velocity (km s$^{-1}$)', ha='center', va='center', size=26)
fig.text(0.07, 0.5, 'Broadening Function', ha='center', va='center', size=26, rotation='vertical')
for i in range (1,nspec):
ax = fig.add_subplot(windowrows, windowcols,i) # out of range if windowcols x windowrows < nspec
ax.yaxis.set_major_locator(MultipleLocator(0.2))
if i!=1 and i!=5 and i!=9 and i!=13 and i!=17 and i!=21 and i!=25:
ax.set_yticklabels(())
#if i!=20 and i!=21 and i!=22 and i!=23 and i!=24 and i!=25:
if i < nspec-windowrows:
#if i!=13 and i!=14 and i!=15 and i!=16:
ax.set_xticklabels(())
plt.subplots_adjust(wspace=0, hspace=0)
plt.axis([xmin, xmax, ymin, ymax])
plt.tick_params(axis='both', which='major', labelsize=14)
plt.text(xmax - 0.25*(np.abs(xmax-xmin)), 0.8*ymax, '%.3f $\phi$' % (phase[i]), size=12)
plt.text(xmax - 0.35*(np.abs(xmax-xmin)), 0.6*ymax, '%s' % (datetimelist[i].iso[0:10]), size=12)
#plt.plot(bf_ind, bfsmoothlist[i], color='k', lw=1.5, ls='-', label='Smoothed BF')
plt.plot(bf_ind, bfnormlist[i], color='k', lw=1.5, ls='-', label='Normalized Smoothed BF')
plt.plot(bf_ind, bffitlist[i][1], color='b', lw=2, ls='--', label='Two Gaussian fit')
gauss1 = gaussian(bf_ind, bffitlist[i][0][0], bffitlist[i][0][1], bffitlist[i][0][2])
gauss2 = gaussian(bf_ind, bffitlist[i][0][3], bffitlist[i][0][4], bffitlist[i][0][5])
plt.plot(bf_ind, gauss1, color='#e34a33', lw=2, ls='--')#, label='Gaussian fit 1')
plt.plot(bf_ind, gauss2, color='#fdbb84', lw=2, ls='--')#, label='Gaussian fit 2')
# OPTION TO PLOT VERTICAL LINE AT ZERO
plt.axvline(x=0, color='0.75')
# print legend
if i==nspec-1: ax.legend(bbox_to_anchor=(2.6,0.7), loc=1, borderaxespad=0.,
frameon=False, handlelength=3, prop={'size':20})
plt.show() | mit |
smrjan/seldon-server | python/build/lib/seldon/sklearn_estimator.py | 3 | 2924 | from sklearn.feature_extraction import DictVectorizer
from seldon.pipeline.pandas_pipelines import BasePandasEstimator
from collections import OrderedDict
import io
from sklearn.utils import check_X_y
from sklearn.utils import check_array
from sklearn.base import BaseEstimator,ClassifierMixin
import pandas as pd
class SKLearnClassifier(BasePandasEstimator,BaseEstimator,ClassifierMixin):
"""
Wrapper for XGBoost classifier with pandas support
XGBoost specific arguments follow https://github.com/dmlc/xgboost/blob/master/python-package/xgboost/sklearn.py
clf : sklearn estimator
sklearn estimator to run
target : str
Target column
target_readable : str
More descriptive version of target variable
included : list str, optional
columns to include
excluded : list str, optional
columns to exclude
id_map : dict (int,str), optional
map of class ids to high level names
sk_args : str, optional
extra args for sklearn classifier
"""
def __init__(self, clf=None,target=None, target_readable=None,included=None,excluded=None,id_map={},vectorizer=None,**sk_args):
super(SKLearnClassifier, self).__init__(target,target_readable,included,excluded,id_map)
self.vectorizer = vectorizer
self.clf = clf
self.sk_args = sk_args
def fit(self,X,y=None):
"""
Fit an sklearn classifier to data
Parameters
----------
X : pandas dataframe or array-like
training samples
y : array like, required for array-like X and not used presently for pandas dataframe
class labels
Returns
-------
self: object
"""
if isinstance(X,pd.DataFrame):
df = X
(X,y,self.vectorizer) = self.convert_numpy(df)
else:
check_X_y(X,y)
self.clf.fit(X,y)
return self
def predict_proba(self,X):
"""
Returns class probability estimates for the given test data.
X : pandas dataframe or array-like
Test samples
Returns
-------
proba : array-like, shape = (n_samples, n_outputs)
Class probability estimates.
"""
if isinstance(X,pd.DataFrame):
df = X
(X,_,_) = self.convert_numpy(df)
else:
check_array(X)
return self.clf.predict_proba(X)
def predict(self,X):
"""
Returns class predictions
X : pandas dataframe or array-like
Test samples
Returns
-------
proba : array-like, shape = (n_samples, n_outputs)
Class predictions
"""
if isinstance(X,pd.DataFrame):
df = X
(X,_,_) = self.convert_numpy(df)
else:
check_array(X)
return self.clf.predict(X)
| apache-2.0 |
webmasterraj/GaSiProMo | flask/lib/python2.7/site-packages/pandas/io/excel.py | 4 | 49188 | """
Module parse to/from Excel
"""
#----------------------------------------------------------------------
# ExcelFile class
import os
import datetime
import abc
import numpy as np
from pandas.io.parsers import TextParser
from pandas.io.common import _is_url, _urlopen
from pandas.tseries.period import Period
from pandas import json
from pandas.compat import map, zip, reduce, range, lrange, u, add_metaclass
from pandas.core import config
from pandas.core.common import pprint_thing
import pandas.compat as compat
import pandas.compat.openpyxl_compat as openpyxl_compat
import pandas.core.common as com
from warnings import warn
from distutils.version import LooseVersion
__all__ = ["read_excel", "ExcelWriter", "ExcelFile"]
_writer_extensions = ["xlsx", "xls", "xlsm"]
_writers = {}
def register_writer(klass):
"""Adds engine to the excel writer registry. You must use this method to
integrate with ``to_excel``. Also adds config options for any new
``supported_extensions`` defined on the writer."""
if not compat.callable(klass):
raise ValueError("Can only register callables as engines")
engine_name = klass.engine
_writers[engine_name] = klass
for ext in klass.supported_extensions:
if ext.startswith('.'):
ext = ext[1:]
if ext not in _writer_extensions:
config.register_option("io.excel.%s.writer" % ext,
engine_name, validator=str)
_writer_extensions.append(ext)
def get_writer(engine_name):
if engine_name == 'openpyxl':
try:
import openpyxl
# with version-less openpyxl engine
# make sure we make the intelligent choice for the user
if LooseVersion(openpyxl.__version__) < '2.0.0':
return _writers['openpyxl1']
else:
return _writers['openpyxl2']
except ImportError:
# fall through to normal exception handling below
pass
try:
return _writers[engine_name]
except KeyError:
raise ValueError("No Excel writer '%s'" % engine_name)
def read_excel(io, sheetname=0, **kwds):
"""Read an Excel table into a pandas DataFrame
Parameters
----------
io : string, file-like object, or xlrd workbook.
The string could be a URL. Valid URL schemes include http, ftp, s3,
and file. For file URLs, a host is expected. For instance, a local
file could be file://localhost/path/to/workbook.xlsx
sheetname : string, int, mixed list of strings/ints, or None, default 0
Strings are used for sheet names, Integers are used in zero-indexed sheet
positions.
Lists of strings/integers are used to request multiple sheets.
Specify None to get all sheets.
str|int -> DataFrame is returned.
list|None -> Dict of DataFrames is returned, with keys representing sheets.
Available Cases
* Defaults to 0 -> 1st sheet as a DataFrame
* 1 -> 2nd sheet as a DataFrame
* "Sheet1" -> 1st sheet as a DataFrame
* [0,1,"Sheet5"] -> 1st, 2nd & 5th sheet as a dictionary of DataFrames
* None -> All sheets as a dictionary of DataFrames
header : int, default 0
Row to use for the column labels of the parsed DataFrame
skiprows : list-like
Rows to skip at the beginning (0-indexed)
skip_footer : int, default 0
Rows at the end to skip (0-indexed)
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the Excel cell content, and return the transformed
content.
index_col : int, default None
Column to use as the row labels of the DataFrame. Pass None if
there is no such column
parse_cols : int or list, default None
* If None then parse all columns,
* If int then indicates last column to be parsed
* If list of ints then indicates list of column numbers to be parsed
* If string then indicates comma separated list of column names and
column ranges (e.g. "A:E" or "A,C,E:F")
na_values : list-like, default None
List of additional strings to recognize as NA/NaN
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to
verbose : boolean, default False
Indicate number of NA values placed in non-numeric columns
engine: string, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or xlrd
convert_float : boolean, default True
convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric
data will be read in as floats: Excel stores all numbers as floats
internally
has_index_names : boolean, default False
True if the cols defined in index_col have an index name and are
not in the header. Index name will be placed on a separate line below
the header.
Returns
-------
parsed : DataFrame or Dict of DataFrames
DataFrame from the passed in Excel file. See notes in sheetname argument
for more information on when a Dict of Dataframes is returned.
"""
if 'kind' in kwds:
kwds.pop('kind')
warn("kind keyword is no longer supported in read_excel and may be "
"removed in a future version", FutureWarning)
engine = kwds.pop('engine', None)
return ExcelFile(io, engine=engine).parse(sheetname=sheetname, **kwds)
class ExcelFile(object):
"""
Class for parsing tabular excel sheets into DataFrame objects.
Uses xlrd. See ExcelFile.parse for more documentation
Parameters
----------
io : string, file-like object or xlrd workbook
If a string, expected to be a path to xls or xlsx file
engine: string, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or xlrd
"""
def __init__(self, io, **kwds):
import xlrd # throw an ImportError if we need to
ver = tuple(map(int, xlrd.__VERSION__.split(".")[:2]))
if ver < (0, 9): # pragma: no cover
raise ImportError("pandas requires xlrd >= 0.9.0 for excel "
"support, current version " + xlrd.__VERSION__)
self.io = io
engine = kwds.pop('engine', None)
if engine is not None and engine != 'xlrd':
raise ValueError("Unknown engine: %s" % engine)
if isinstance(io, compat.string_types):
if _is_url(io):
data = _urlopen(io).read()
self.book = xlrd.open_workbook(file_contents=data)
else:
self.book = xlrd.open_workbook(io)
elif engine == 'xlrd' and isinstance(io, xlrd.Book):
self.book = io
elif not isinstance(io, xlrd.Book) and hasattr(io, "read"):
# N.B. xlrd.Book has a read attribute too
data = io.read()
self.book = xlrd.open_workbook(file_contents=data)
else:
raise ValueError('Must explicitly set engine if not passing in'
' buffer or path for io.')
def parse(self, sheetname=0, header=0, skiprows=None, skip_footer=0,
index_col=None, parse_cols=None, parse_dates=False,
date_parser=None, na_values=None, thousands=None, chunksize=None,
convert_float=True, has_index_names=False, converters=None, **kwds):
"""Read an Excel table into DataFrame
Parameters
----------
sheetname : string, int, mixed list of strings/ints, or None, default 0
Strings are used for sheet names, Integers are used in zero-indexed sheet
positions.
Lists of strings/integers are used to request multiple sheets.
Specify None to get all sheets.
str|int -> DataFrame is returned.
list|None -> Dict of DataFrames is returned, with keys representing sheets.
Available Cases
* Defaults to 0 -> 1st sheet as a DataFrame
* 1 -> 2nd sheet as a DataFrame
* "Sheet1" -> 1st sheet as a DataFrame
* [0,1,"Sheet5"] -> 1st, 2nd & 5th sheet as a dictionary of DataFrames
* None -> All sheets as a dictionary of DataFrames
header : int, default 0
Row to use for the column labels of the parsed DataFrame
skiprows : list-like
Rows to skip at the beginning (0-indexed)
skip_footer : int, default 0
Rows at the end to skip (0-indexed)
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels
index_col : int, default None
Column to use as the row labels of the DataFrame. Pass None if
there is no such column
parse_cols : int or list, default None
* If None then parse all columns
* If int then indicates last column to be parsed
* If list of ints then indicates list of column numbers to be
parsed
* If string then indicates comma separated list of column names and
column ranges (e.g. "A:E" or "A,C,E:F")
parse_dates : boolean, default False
Parse date Excel values,
date_parser : function default None
Date parsing function
na_values : list-like, default None
List of additional strings to recognize as NA/NaN
thousands : str, default None
Thousands separator
chunksize : int, default None
Size of file chunk to read for lazy evaluation.
convert_float : boolean, default True
convert integral floats to int (i.e., 1.0 --> 1). If False, all
numeric data will be read in as floats: Excel stores all numbers as
floats internally.
has_index_names : boolean, default False
True if the cols defined in index_col have an index name and are
not in the header
verbose : boolean, default False
Set to True to print a single statement when reading each
excel sheet.
Returns
-------
parsed : DataFrame or Dict of DataFrames
DataFrame from the passed in Excel file. See notes in sheetname argument
for more information on when a Dict of Dataframes is returned.
"""
skipfooter = kwds.pop('skipfooter', None)
if skipfooter is not None:
skip_footer = skipfooter
return self._parse_excel(sheetname=sheetname, header=header,
skiprows=skiprows,
index_col=index_col,
has_index_names=has_index_names,
parse_cols=parse_cols,
parse_dates=parse_dates,
date_parser=date_parser, na_values=na_values,
thousands=thousands, chunksize=chunksize,
skip_footer=skip_footer,
convert_float=convert_float,
converters=converters,
**kwds)
def _should_parse(self, i, parse_cols):
def _range2cols(areas):
"""
Convert comma separated list of column names and column ranges to a
list of 0-based column indexes.
>>> _range2cols('A:E')
[0, 1, 2, 3, 4]
>>> _range2cols('A,C,Z:AB')
[0, 2, 25, 26, 27]
"""
def _excel2num(x):
"Convert Excel column name like 'AB' to 0-based column index"
return reduce(lambda s, a: s * 26 + ord(a) - ord('A') + 1,
x.upper().strip(), 0) - 1
cols = []
for rng in areas.split(','):
if ':' in rng:
rng = rng.split(':')
cols += lrange(_excel2num(rng[0]), _excel2num(rng[1]) + 1)
else:
cols.append(_excel2num(rng))
return cols
if isinstance(parse_cols, int):
return i <= parse_cols
elif isinstance(parse_cols, compat.string_types):
return i in _range2cols(parse_cols)
else:
return i in parse_cols
def _parse_excel(self, sheetname=0, header=0, skiprows=None, skip_footer=0,
index_col=None, has_index_names=None, parse_cols=None,
parse_dates=False, date_parser=None, na_values=None,
thousands=None, chunksize=None, convert_float=True,
verbose=False, **kwds):
import xlrd
from xlrd import (xldate, XL_CELL_DATE,
XL_CELL_ERROR, XL_CELL_BOOLEAN,
XL_CELL_NUMBER)
epoch1904 = self.book.datemode
def _parse_cell(cell_contents,cell_typ):
"""converts the contents of the cell into a pandas
appropriate object"""
if cell_typ == XL_CELL_DATE:
if xlrd_0_9_3:
# Use the newer xlrd datetime handling.
cell_contents = xldate.xldate_as_datetime(cell_contents,
epoch1904)
# Excel doesn't distinguish between dates and time,
# so we treat dates on the epoch as times only.
# Also, Excel supports 1900 and 1904 epochs.
year = (cell_contents.timetuple())[0:3]
if ((not epoch1904 and year == (1899, 12, 31))
or (epoch1904 and year == (1904, 1, 1))):
cell_contents = datetime.time(cell_contents.hour,
cell_contents.minute,
cell_contents.second,
cell_contents.microsecond)
else:
# Use the xlrd <= 0.9.2 date handling.
dt = xldate.xldate_as_tuple(cell_contents, epoch1904)
if dt[0] < datetime.MINYEAR:
cell_contents = datetime.time(*dt[3:])
else:
cell_contents = datetime.datetime(*dt)
elif cell_typ == XL_CELL_ERROR:
cell_contents = np.nan
elif cell_typ == XL_CELL_BOOLEAN:
cell_contents = bool(cell_contents)
elif convert_float and cell_typ == XL_CELL_NUMBER:
# GH5394 - Excel 'numbers' are always floats
# it's a minimal perf hit and less suprising
val = int(cell_contents)
if val == cell_contents:
cell_contents = val
return cell_contents
# xlrd >= 0.9.3 can return datetime objects directly.
if LooseVersion(xlrd.__VERSION__) >= LooseVersion("0.9.3"):
xlrd_0_9_3 = True
else:
xlrd_0_9_3 = False
ret_dict = False
#Keep sheetname to maintain backwards compatibility.
if isinstance(sheetname, list):
sheets = sheetname
ret_dict = True
elif sheetname is None:
sheets = self.sheet_names
ret_dict = True
else:
sheets = [sheetname]
#handle same-type duplicates.
sheets = list(set(sheets))
output = {}
for asheetname in sheets:
if verbose:
print("Reading sheet %s" % asheetname)
if isinstance(asheetname, compat.string_types):
sheet = self.book.sheet_by_name(asheetname)
else: # assume an integer if not a string
sheet = self.book.sheet_by_index(asheetname)
data = []
should_parse = {}
for i in range(sheet.nrows):
row = []
for j, (value, typ) in enumerate(zip(sheet.row_values(i),
sheet.row_types(i))):
if parse_cols is not None and j not in should_parse:
should_parse[j] = self._should_parse(j, parse_cols)
if parse_cols is None or should_parse[j]:
row.append(_parse_cell(value,typ))
data.append(row)
if header is not None:
data[header] = _trim_excel_header(data[header])
parser = TextParser(data, header=header, index_col=index_col,
has_index_names=has_index_names,
na_values=na_values,
thousands=thousands,
parse_dates=parse_dates,
date_parser=date_parser,
skiprows=skiprows,
skip_footer=skip_footer,
chunksize=chunksize,
**kwds)
output[asheetname] = parser.read()
if ret_dict:
return output
else:
return output[asheetname]
@property
def sheet_names(self):
return self.book.sheet_names()
def close(self):
"""close io if necessary"""
if hasattr(self.io, 'close'):
self.io.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def _trim_excel_header(row):
# trim header row so auto-index inference works
# xlrd uses '' , openpyxl None
while len(row) > 0 and (row[0] == '' or row[0] is None):
row = row[1:]
return row
def _conv_value(val):
# Convert numpy types to Python types for the Excel writers.
if com.is_integer(val):
val = int(val)
elif com.is_float(val):
val = float(val)
elif com.is_bool(val):
val = bool(val)
elif isinstance(val, Period):
val = "%s" % val
return val
@add_metaclass(abc.ABCMeta)
class ExcelWriter(object):
"""
Class for writing DataFrame objects into excel sheets, default is to use
xlwt for xls, openpyxl for xlsx. See DataFrame.to_excel for typical usage.
Parameters
----------
path : string
Path to xls or xlsx file.
engine : string (optional)
Engine to use for writing. If None, defaults to
``io.excel.<extension>.writer``. NOTE: can only be passed as a keyword
argument.
date_format : string, default None
Format string for dates written into Excel files (e.g. 'YYYY-MM-DD')
datetime_format : string, default None
Format string for datetime objects written into Excel files
(e.g. 'YYYY-MM-DD HH:MM:SS')
"""
# Defining an ExcelWriter implementation (see abstract methods for more...)
# - Mandatory
# - ``write_cells(self, cells, sheet_name=None, startrow=0, startcol=0)``
# --> called to write additional DataFrames to disk
# - ``supported_extensions`` (tuple of supported extensions), used to
# check that engine supports the given extension.
# - ``engine`` - string that gives the engine name. Necessary to
# instantiate class directly and bypass ``ExcelWriterMeta`` engine
# lookup.
# - ``save(self)`` --> called to save file to disk
# - Mostly mandatory (i.e. should at least exist)
# - book, cur_sheet, path
# - Optional:
# - ``__init__(self, path, engine=None, **kwargs)`` --> always called
# with path as first argument.
# You also need to register the class with ``register_writer()``.
# Technically, ExcelWriter implementations don't need to subclass
# ExcelWriter.
def __new__(cls, path, engine=None, **kwargs):
# only switch class if generic(ExcelWriter)
if cls == ExcelWriter:
if engine is None:
ext = os.path.splitext(path)[-1][1:]
try:
engine = config.get_option('io.excel.%s.writer' % ext)
except KeyError:
error = ValueError("No engine for filetype: '%s'" % ext)
raise error
cls = get_writer(engine)
return object.__new__(cls)
# declare external properties you can count on
book = None
curr_sheet = None
path = None
@abc.abstractproperty
def supported_extensions(self):
"extensions that writer engine supports"
pass
@abc.abstractproperty
def engine(self):
"name of engine"
pass
@abc.abstractmethod
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
"""
Write given formated cells into Excel an excel sheet
Parameters
----------
cells : generator
cell of formated data to save to Excel sheet
sheet_name : string, default None
Name of Excel sheet, if None, then use self.cur_sheet
startrow: upper left cell row to dump data frame
startcol: upper left cell column to dump data frame
"""
pass
@abc.abstractmethod
def save(self):
"""
Save workbook to disk.
"""
pass
def __init__(self, path, engine=None,
date_format=None, datetime_format=None, **engine_kwargs):
# validate that this engine can handle the extension
ext = os.path.splitext(path)[-1]
self.check_extension(ext)
self.path = path
self.sheets = {}
self.cur_sheet = None
if date_format is None:
self.date_format = 'YYYY-MM-DD'
else:
self.date_format = date_format
if datetime_format is None:
self.datetime_format = 'YYYY-MM-DD HH:MM:SS'
else:
self.datetime_format = datetime_format
def _get_sheet_name(self, sheet_name):
if sheet_name is None:
sheet_name = self.cur_sheet
if sheet_name is None: # pragma: no cover
raise ValueError('Must pass explicit sheet_name or set '
'cur_sheet property')
return sheet_name
@classmethod
def check_extension(cls, ext):
"""checks that path's extension against the Writer's supported
extensions. If it isn't supported, raises UnsupportedFiletypeError."""
if ext.startswith('.'):
ext = ext[1:]
if not any(ext in extension for extension in cls.supported_extensions):
msg = (u("Invalid extension for engine '%s': '%s'") %
(pprint_thing(cls.engine), pprint_thing(ext)))
raise ValueError(msg)
else:
return True
# Allow use as a contextmanager
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""synonym for save, to make it more file-like"""
return self.save()
class _Openpyxl1Writer(ExcelWriter):
engine = 'openpyxl1'
supported_extensions = ('.xlsx', '.xlsm')
openpyxl_majorver = 1
def __init__(self, path, engine=None, **engine_kwargs):
if not openpyxl_compat.is_compat(major_ver=self.openpyxl_majorver):
raise ValueError('Installed openpyxl is not supported at this '
'time. Use {0}.x.y.'
.format(self.openpyxl_majorver))
# Use the openpyxl module as the Excel writer.
from openpyxl.workbook import Workbook
super(_Openpyxl1Writer, self).__init__(path, **engine_kwargs)
# Create workbook object with default optimized_write=True.
self.book = Workbook()
# Openpyxl 1.6.1 adds a dummy sheet. We remove it.
if self.book.worksheets:
self.book.remove_sheet(self.book.worksheets[0])
def save(self):
"""
Save workbook to disk.
"""
return self.book.save(self.path)
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
# Write the frame cells using openpyxl.
from openpyxl.cell import get_column_letter
sheet_name = self._get_sheet_name(sheet_name)
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.create_sheet()
wks.title = sheet_name
self.sheets[sheet_name] = wks
for cell in cells:
colletter = get_column_letter(startcol + cell.col + 1)
xcell = wks.cell("%s%s" % (colletter, startrow + cell.row + 1))
xcell.value = _conv_value(cell.val)
style = None
if cell.style:
style = self._convert_to_style(cell.style)
for field in style.__fields__:
xcell.style.__setattr__(field,
style.__getattribute__(field))
if isinstance(cell.val, datetime.datetime):
xcell.style.number_format.format_code = self.datetime_format
elif isinstance(cell.val, datetime.date):
xcell.style.number_format.format_code = self.date_format
if cell.mergestart is not None and cell.mergeend is not None:
cletterstart = get_column_letter(startcol + cell.col + 1)
cletterend = get_column_letter(startcol + cell.mergeend + 1)
wks.merge_cells('%s%s:%s%s' % (cletterstart,
startrow + cell.row + 1,
cletterend,
startrow + cell.mergestart + 1))
# Excel requires that the format of the first cell in a merged
# range is repeated in the rest of the merged range.
if style:
first_row = startrow + cell.row + 1
last_row = startrow + cell.mergestart + 1
first_col = startcol + cell.col + 1
last_col = startcol + cell.mergeend + 1
for row in range(first_row, last_row + 1):
for col in range(first_col, last_col + 1):
if row == first_row and col == first_col:
# Ignore first cell. It is already handled.
continue
colletter = get_column_letter(col)
xcell = wks.cell("%s%s" % (colletter, row))
for field in style.__fields__:
xcell.style.__setattr__(
field, style.__getattribute__(field))
@classmethod
def _convert_to_style(cls, style_dict):
"""
converts a style_dict to an openpyxl style object
Parameters
----------
style_dict: style dictionary to convert
"""
from openpyxl.style import Style
xls_style = Style()
for key, value in style_dict.items():
for nk, nv in value.items():
if key == "borders":
(xls_style.borders.__getattribute__(nk)
.__setattr__('border_style', nv))
else:
xls_style.__getattribute__(key).__setattr__(nk, nv)
return xls_style
register_writer(_Openpyxl1Writer)
class _OpenpyxlWriter(_Openpyxl1Writer):
engine = 'openpyxl'
register_writer(_OpenpyxlWriter)
class _Openpyxl2Writer(_Openpyxl1Writer):
"""
Note: Support for OpenPyxl v2 is currently EXPERIMENTAL (GH7565).
"""
engine = 'openpyxl2'
openpyxl_majorver = 2
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
# Write the frame cells using openpyxl.
from openpyxl.cell import get_column_letter
sheet_name = self._get_sheet_name(sheet_name)
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.create_sheet()
wks.title = sheet_name
self.sheets[sheet_name] = wks
for cell in cells:
colletter = get_column_letter(startcol + cell.col + 1)
xcell = wks.cell("%s%s" % (colletter, startrow + cell.row + 1))
xcell.value = _conv_value(cell.val)
style_kwargs = {}
# Apply format codes before cell.style to allow override
if isinstance(cell.val, datetime.datetime):
style_kwargs.update(self._convert_to_style_kwargs({
'number_format':{'format_code': self.datetime_format}}))
elif isinstance(cell.val, datetime.date):
style_kwargs.update(self._convert_to_style_kwargs({
'number_format':{'format_code': self.date_format}}))
if cell.style:
style_kwargs.update(self._convert_to_style_kwargs(cell.style))
if style_kwargs:
xcell.style = xcell.style.copy(**style_kwargs)
if cell.mergestart is not None and cell.mergeend is not None:
cletterstart = get_column_letter(startcol + cell.col + 1)
cletterend = get_column_letter(startcol + cell.mergeend + 1)
wks.merge_cells('%s%s:%s%s' % (cletterstart,
startrow + cell.row + 1,
cletterend,
startrow + cell.mergestart + 1))
# Excel requires that the format of the first cell in a merged
# range is repeated in the rest of the merged range.
if style_kwargs:
first_row = startrow + cell.row + 1
last_row = startrow + cell.mergestart + 1
first_col = startcol + cell.col + 1
last_col = startcol + cell.mergeend + 1
for row in range(first_row, last_row + 1):
for col in range(first_col, last_col + 1):
if row == first_row and col == first_col:
# Ignore first cell. It is already handled.
continue
colletter = get_column_letter(col)
xcell = wks.cell("%s%s" % (colletter, row))
xcell.style = xcell.style.copy(**style_kwargs)
@classmethod
def _convert_to_style_kwargs(cls, style_dict):
"""
Convert a style_dict to a set of kwargs suitable for initializing
or updating-on-copy an openpyxl v2 style object
Parameters
----------
style_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'font'
'fill'
'border' ('borders')
'alignment'
'number_format'
'protection'
Returns
-------
style_kwargs : dict
A dict with the same, normalized keys as ``style_dict`` but each
value has been replaced with a native openpyxl style object of the
appropriate class.
"""
_style_key_map = {
'borders': 'border',
}
style_kwargs = {}
for k, v in style_dict.items():
if k in _style_key_map:
k = _style_key_map[k]
_conv_to_x = getattr(cls, '_convert_to_{0}'.format(k),
lambda x: None)
new_v = _conv_to_x(v)
if new_v:
style_kwargs[k] = new_v
return style_kwargs
@classmethod
def _convert_to_color(cls, color_spec):
"""
Convert ``color_spec`` to an openpyxl v2 Color object
Parameters
----------
color_spec : str, dict
A 32-bit ARGB hex string, or a dict with zero or more of the
following keys.
'rgb'
'indexed'
'auto'
'theme'
'tint'
'index'
'type'
Returns
-------
color : openpyxl.styles.Color
"""
from openpyxl.styles import Color
if isinstance(color_spec, str):
return Color(color_spec)
else:
return Color(**color_spec)
@classmethod
def _convert_to_font(cls, font_dict):
"""
Convert ``font_dict`` to an openpyxl v2 Font object
Parameters
----------
font_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'name'
'size' ('sz')
'bold' ('b')
'italic' ('i')
'underline' ('u')
'strikethrough' ('strike')
'color'
'vertAlign' ('vertalign')
'charset'
'scheme'
'family'
'outline'
'shadow'
'condense'
Returns
-------
font : openpyxl.styles.Font
"""
from openpyxl.styles import Font
_font_key_map = {
'sz': 'size',
'b': 'bold',
'i': 'italic',
'u': 'underline',
'strike': 'strikethrough',
'vertalign': 'vertAlign',
}
font_kwargs = {}
for k, v in font_dict.items():
if k in _font_key_map:
k = _font_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
font_kwargs[k] = v
return Font(**font_kwargs)
@classmethod
def _convert_to_stop(cls, stop_seq):
"""
Convert ``stop_seq`` to a list of openpyxl v2 Color objects,
suitable for initializing the ``GradientFill`` ``stop`` parameter.
Parameters
----------
stop_seq : iterable
An iterable that yields objects suitable for consumption by
``_convert_to_color``.
Returns
-------
stop : list of openpyxl.styles.Color
"""
return map(cls._convert_to_color, stop_seq)
@classmethod
def _convert_to_fill(cls, fill_dict):
"""
Convert ``fill_dict`` to an openpyxl v2 Fill object
Parameters
----------
fill_dict : dict
A dict with one or more of the following keys (or their synonyms),
'fill_type' ('patternType', 'patterntype')
'start_color' ('fgColor', 'fgcolor')
'end_color' ('bgColor', 'bgcolor')
or one or more of the following keys (or their synonyms).
'type' ('fill_type')
'degree'
'left'
'right'
'top'
'bottom'
'stop'
Returns
-------
fill : openpyxl.styles.Fill
"""
from openpyxl.styles import PatternFill, GradientFill
_pattern_fill_key_map = {
'patternType': 'fill_type',
'patterntype': 'fill_type',
'fgColor': 'start_color',
'fgcolor': 'start_color',
'bgColor': 'end_color',
'bgcolor': 'end_color',
}
_gradient_fill_key_map = {
'fill_type': 'type',
}
pfill_kwargs = {}
gfill_kwargs = {}
for k, v in fill_dict.items():
pk = gk = None
if k in _pattern_fill_key_map:
pk = _pattern_fill_key_map[k]
if k in _gradient_fill_key_map:
gk = _gradient_fill_key_map[k]
if pk in ['start_color', 'end_color']:
v = cls._convert_to_color(v)
if gk == 'stop':
v = cls._convert_to_stop(v)
if pk:
pfill_kwargs[pk] = v
elif gk:
gfill_kwargs[gk] = v
else:
pfill_kwargs[k] = v
gfill_kwargs[k] = v
try:
return PatternFill(**pfill_kwargs)
except TypeError:
return GradientFill(**gfill_kwargs)
@classmethod
def _convert_to_side(cls, side_spec):
"""
Convert ``side_spec`` to an openpyxl v2 Side object
Parameters
----------
side_spec : str, dict
A string specifying the border style, or a dict with zero or more
of the following keys (or their synonyms).
'style' ('border_style')
'color'
Returns
-------
side : openpyxl.styles.Side
"""
from openpyxl.styles import Side
_side_key_map = {
'border_style': 'style',
}
if isinstance(side_spec, str):
return Side(style=side_spec)
side_kwargs = {}
for k, v in side_spec.items():
if k in _side_key_map:
k = _side_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
side_kwargs[k] = v
return Side(**side_kwargs)
@classmethod
def _convert_to_border(cls, border_dict):
"""
Convert ``border_dict`` to an openpyxl v2 Border object
Parameters
----------
border_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'left'
'right'
'top'
'bottom'
'diagonal'
'diagonal_direction'
'vertical'
'horizontal'
'diagonalUp' ('diagonalup')
'diagonalDown' ('diagonaldown')
'outline'
Returns
-------
border : openpyxl.styles.Border
"""
from openpyxl.styles import Border
_border_key_map = {
'diagonalup': 'diagonalUp',
'diagonaldown': 'diagonalDown',
}
border_kwargs = {}
for k, v in border_dict.items():
if k in _border_key_map:
k = _border_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
if k in ['left', 'right', 'top', 'bottom', 'diagonal']:
v = cls._convert_to_side(v)
border_kwargs[k] = v
return Border(**border_kwargs)
@classmethod
def _convert_to_alignment(cls, alignment_dict):
"""
Convert ``alignment_dict`` to an openpyxl v2 Alignment object
Parameters
----------
alignment_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'horizontal'
'vertical'
'text_rotation'
'wrap_text'
'shrink_to_fit'
'indent'
Returns
-------
alignment : openpyxl.styles.Alignment
"""
from openpyxl.styles import Alignment
return Alignment(**alignment_dict)
@classmethod
def _convert_to_number_format(cls, number_format_dict):
"""
Convert ``number_format_dict`` to an openpyxl v2.1.0 number format
initializer.
Parameters
----------
number_format_dict : dict
A dict with zero or more of the following keys.
'format_code' : str
Returns
-------
number_format : str
"""
try:
# >= 2.0.0 < 2.1.0
from openpyxl.styles import NumberFormat
return NumberFormat(**number_format_dict)
except:
# >= 2.1.0
return number_format_dict['format_code']
@classmethod
def _convert_to_protection(cls, protection_dict):
"""
Convert ``protection_dict`` to an openpyxl v2 Protection object.
Parameters
----------
protection_dict : dict
A dict with zero or more of the following keys.
'locked'
'hidden'
Returns
-------
"""
from openpyxl.styles import Protection
return Protection(**protection_dict)
register_writer(_Openpyxl2Writer)
class _XlwtWriter(ExcelWriter):
engine = 'xlwt'
supported_extensions = ('.xls',)
def __init__(self, path, engine=None, encoding=None, **engine_kwargs):
# Use the xlwt module as the Excel writer.
import xlwt
super(_XlwtWriter, self).__init__(path, **engine_kwargs)
if encoding is None:
encoding = 'ascii'
self.book = xlwt.Workbook(encoding=encoding)
self.fm_datetime = xlwt.easyxf(num_format_str=self.datetime_format)
self.fm_date = xlwt.easyxf(num_format_str=self.date_format)
def save(self):
"""
Save workbook to disk.
"""
return self.book.save(self.path)
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
# Write the frame cells using xlwt.
sheet_name = self._get_sheet_name(sheet_name)
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.add_sheet(sheet_name)
self.sheets[sheet_name] = wks
style_dict = {}
for cell in cells:
val = _conv_value(cell.val)
num_format_str = None
if isinstance(cell.val, datetime.datetime):
num_format_str = self.datetime_format
elif isinstance(cell.val, datetime.date):
num_format_str = self.date_format
stylekey = json.dumps(cell.style)
if num_format_str:
stylekey += num_format_str
if stylekey in style_dict:
style = style_dict[stylekey]
else:
style = self._convert_to_style(cell.style, num_format_str)
style_dict[stylekey] = style
if cell.mergestart is not None and cell.mergeend is not None:
wks.write_merge(startrow + cell.row,
startrow + cell.mergestart,
startcol + cell.col,
startcol + cell.mergeend,
val, style)
else:
wks.write(startrow + cell.row,
startcol + cell.col,
val, style)
@classmethod
def _style_to_xlwt(cls, item, firstlevel=True, field_sep=',',
line_sep=';'):
"""helper which recursively generate an xlwt easy style string
for example:
hstyle = {"font": {"bold": True},
"border": {"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin"},
"align": {"horiz": "center"}}
will be converted to
font: bold on; \
border: top thin, right thin, bottom thin, left thin; \
align: horiz center;
"""
if hasattr(item, 'items'):
if firstlevel:
it = ["%s: %s" % (key, cls._style_to_xlwt(value, False))
for key, value in item.items()]
out = "%s " % (line_sep).join(it)
return out
else:
it = ["%s %s" % (key, cls._style_to_xlwt(value, False))
for key, value in item.items()]
out = "%s " % (field_sep).join(it)
return out
else:
item = "%s" % item
item = item.replace("True", "on")
item = item.replace("False", "off")
return item
@classmethod
def _convert_to_style(cls, style_dict, num_format_str=None):
"""
converts a style_dict to an xlwt style object
Parameters
----------
style_dict: style dictionary to convert
num_format_str: optional number format string
"""
import xlwt
if style_dict:
xlwt_stylestr = cls._style_to_xlwt(style_dict)
style = xlwt.easyxf(xlwt_stylestr, field_sep=',', line_sep=';')
else:
style = xlwt.XFStyle()
if num_format_str is not None:
style.num_format_str = num_format_str
return style
register_writer(_XlwtWriter)
class _XlsxWriter(ExcelWriter):
engine = 'xlsxwriter'
supported_extensions = ('.xlsx',)
def __init__(self, path, engine=None,
date_format=None, datetime_format=None, **engine_kwargs):
# Use the xlsxwriter module as the Excel writer.
import xlsxwriter
super(_XlsxWriter, self).__init__(path, engine=engine,
date_format=date_format,
datetime_format=datetime_format,
**engine_kwargs)
self.book = xlsxwriter.Workbook(path, **engine_kwargs)
def save(self):
"""
Save workbook to disk.
"""
return self.book.close()
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
# Write the frame cells using xlsxwriter.
sheet_name = self._get_sheet_name(sheet_name)
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.add_worksheet(sheet_name)
self.sheets[sheet_name] = wks
style_dict = {}
for cell in cells:
num_format_str = None
if isinstance(cell.val, datetime.datetime):
num_format_str = self.datetime_format
elif isinstance(cell.val, datetime.date):
num_format_str = self.date_format
stylekey = json.dumps(cell.style)
if num_format_str:
stylekey += num_format_str
if stylekey in style_dict:
style = style_dict[stylekey]
else:
style = self._convert_to_style(cell.style, num_format_str)
style_dict[stylekey] = style
if cell.mergestart is not None and cell.mergeend is not None:
wks.merge_range(startrow + cell.row,
startcol + cell.col,
startrow + cell.mergestart,
startcol + cell.mergeend,
cell.val, style)
else:
wks.write(startrow + cell.row,
startcol + cell.col,
cell.val, style)
def _convert_to_style(self, style_dict, num_format_str=None):
"""
converts a style_dict to an xlsxwriter format object
Parameters
----------
style_dict: style dictionary to convert
num_format_str: optional number format string
"""
# If there is no formatting we don't create a format object.
if num_format_str is None and style_dict is None:
return None
# Create a XlsxWriter format object.
xl_format = self.book.add_format()
if num_format_str is not None:
xl_format.set_num_format(num_format_str)
if style_dict is None:
return xl_format
# Map the cell font to XlsxWriter font properties.
if style_dict.get('font'):
font = style_dict['font']
if font.get('bold'):
xl_format.set_bold()
# Map the alignment to XlsxWriter alignment properties.
alignment = style_dict.get('alignment')
if alignment:
if (alignment.get('horizontal')
and alignment['horizontal'] == 'center'):
xl_format.set_align('center')
if (alignment.get('vertical')
and alignment['vertical'] == 'top'):
xl_format.set_align('top')
# Map the cell borders to XlsxWriter border properties.
if style_dict.get('borders'):
xl_format.set_border()
return xl_format
register_writer(_XlsxWriter)
| gpl-2.0 |
kobauman/education_tools | mastery_grids/experiment/splitAndCheck.py | 1 | 3481 | import sys
import pandas as pd
import numpy as np
from scipy import stats
def significant(array1,array2):
try:
arr1 = np.array(array1)
arr2 = np.array(array2)
print(stats.ttest_ind(arr1,arr2)[1])
return stats.ttest_ind(arr1,arr2)[1] < 0.1
except:
print('PROBLEM!')
return None
def splitStudents(courseID, logins):
df = pd.DataFrame.from_csv('../data/course2students/'+str(courseID)+'_students.csv',index_col=None)
eqFlag = False
iteration = 1
while eqFlag == False:
print(iteration)
iteration+=1
df['expGroup'] = np.random.choice([1,2],len(df))
print(list(df['expGroup']))
eqFlag = True
#check significance of the difference
allPrior1 = df[df['expGroup']==1]['allPrior']
allPrior1 = list(allPrior1[pd.notnull(allPrior1)])
allPrior2 = df[df['expGroup']==2]['allPrior']
allPrior2 = list(allPrior2[pd.notnull(allPrior2)])
if significant(allPrior1,allPrior2):
eqFlag = False
print(np.average(allPrior1),np.average(allPrior2),significant(allPrior1,allPrior2))
lastPrior1 = df[df['expGroup']==1]['lastPrior']
lastPrior1 = list(lastPrior1[pd.notnull(lastPrior1)])
lastPrior2 = df[df['expGroup']==2]['lastPrior']
lastPrior2 = list(lastPrior2[pd.notnull(lastPrior2)])
if significant(lastPrior1,lastPrior2):
eqFlag = False
print(np.average(lastPrior1),np.average(lastPrior2),significant(lastPrior1,lastPrior2))
CSPrior1 = df[df['expGroup']==1]['CSPrior']
CSPrior1 = list(CSPrior1[pd.notnull(CSPrior1)])
CSPrior2 = df[df['expGroup']==2]['CSPrior']
CSPrior2 = list(CSPrior2[pd.notnull(CSPrior2)])
if significant(CSPrior1,CSPrior2):
eqFlag = False
print(np.average(CSPrior1),np.average(CSPrior2),significant(CSPrior1,CSPrior2))
lastCSPrior1 = df[df['expGroup']==1]['lastCSPrior']
lastCSPrior1 = list(lastCSPrior1[pd.notnull(lastCSPrior1)])
lastCSPrior2 = df[df['expGroup']==2]['lastCSPrior']
lastCSPrior2 = list(lastCSPrior2[pd.notnull(lastCSPrior2)])
if significant(lastCSPrior1,lastCSPrior2):
eqFlag = False
print(np.average(lastCSPrior1),np.average(lastCSPrior2),significant(lastCSPrior1,lastCSPrior2))
print(df['expGroup'].value_counts())
#assign logins
logins1 = pd.DataFrame.from_csv('../data/logins/'+logins+'_1.csv',index_col=None)
logins2 = pd.DataFrame.from_csv('../data/logins/'+logins+'_2.csv',index_col=None)
df.loc[df['expGroup']==1,'login'] = list(logins1['login'])[:len(df.loc[df['expGroup']==1])]
df.loc[df['expGroup']==2,'login'] = list(logins2['login'])[:len(df.loc[df['expGroup']==2])]
df.loc[df['expGroup']==1,'password'] = list(logins1['pswd'])[:len(df.loc[df['expGroup']==1])]
df.loc[df['expGroup']==2,'password'] = list(logins2['pswd'])[:len(df.loc[df['expGroup']==2])]
df.to_csv('../data/panel_data/'+str(courseID)+'_panel.csv',index=False)
#run function
if __name__ == "__main__":
'''
Randomly split the students, checking the equivalence of the groups
'''
if len(sys.argv) > 1:
splitStudents(int(sys.argv[1]),sys.argv[2])
else:
print("Appropriate format: python splitAndCheck.py 826 logins")
| apache-2.0 |
quheng/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 249 | 1095 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
andaag/scikit-learn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
jjtoharia/KaggleOutbrain | pySpark/temp_spark.py | 1 | 32616 | # -*- coding: utf-8 -*-
"""
Editor de Spyder
Este es un archivo temporal
"""
#
# Esto en [pyspark | GoogleCloud] NO hace falta (ya hay una seasión de spark lanzada y un sparkContext creado):
# C:\Archivos de programa\Google\Cloud SDK>gcloud compute instances start cluster-jjtzapata-m cluster-jjtzapata-w-0 cluster-jjtzapata-w-1 --zone europe-west1-d
#
from pyspark.sql import SparkSession
miSparkSession = SparkSession \
.builder \
.appName("Spark-Outbrain-JJTZ") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
sc = miSparkSession.sparkContext
#SparkSession.builder.master("local[*]").appName("Outbrain-JJTZ2").getOrCreate()
# miSparkSession.stop()
# sc.stop()
#from pyspark import SparkConf, SparkContext
#conf = SparkConf().setMaster("local").setAppName("Outbrain-JJTZ")
#miSparkContext = SparkContext(conf = conf)
#from pyspark.sql.types import StringType
#from pyspark import SQLContext
#sqlContext = SQLContext(miSparkContext)
#
# CARGAMOS DATOS:
#
s_input_path = "C:/Users/jtoharia/Downloads/Kaggle_Outbrain/"
s_output_path = "C:/Users/jtoharia/Dropbox/AFI_JOSE/Kaggle/Outbrain/"
#f = sc.textFile(s_input_path + "clicks_train_spark.csv") # 87.141.732 resgitros
f = sc.textFile(s_input_path + "clicks_train_debug_spark.csv") # 54.348 registros
f = sc.textFile(s_output_path + "clicks_train_debug_spark.csv") # 54.348 registros
f = sc.textFile("gs://jjtzapata/clicks_train_debug_spark.csv") # 54.348 registros
f = sc.textFile("/home/jjtzapata/clicks_train_debug_spark.csv") # 54.348 registros
# # NOTA: Para copiar a la máquina de gcloud (cuidado que lo copia a otro usuarioque no es jjtoharia, seguramente /home/jjtzapata!):
# gcloud compute copy-files "C:\Personal\Dropbox\AFI_JOSE\Kaggle\Outbrain\prueba.libsvm" cluster-jjtzapata-m: --zone europe-west1-d
# # NOTA: Para copiar al Google Storage gs://jjtzapata
# gsutil cp "C:\Personal\Dropbox\AFI_JOSE\Kaggle\Outbrain\prueba.libsvm" gs://jjtzapata
# gsutil cp "C:\Personal\Dropbox\AFI_JOSE\Kaggle\Outbrain\clicks_train_debug_spark.csv" gs://jjtzapata
# # Instancias (máquinas, clusters) Google Cloud Dataproc:
# # Para ver la IP externa: gcloud compute instances list
# gcloud compute instances start cluster-jjtzapata-m --zone europe-west1-d
# f.cache()
#f.count() # Tarda mucho! (6 min) 87.141.732
#Remove the first line (contains headers)
cabecera = f.first()
f = f.filter(lambda x: x != cabecera).map(lambda lin: lin.replace("\"","").replace("'","").split(","))
#f.count() # Tarda mucho! (6 min) 87.141.731
#f.take(1)
campos_enteros = ['display_id', 'ad_document_id', 'document_id', 'ad_id', 'clicked', 'numAds', 'platform', 'hora', 'dia', 'ad_campaign_id', 'ad_advertiser_id', 'source_id', 'publisher_id', 'ad_source_id', 'ad_publisher_id', 'pais_US', 'pais_GB' ,'pais_CA' ,'pais_resto']
campos_string = ['uuid'] # Eliminados: 'geo_location', 'geo_loc.country', 'pais', 'publish_time', 'ad_publish_time'
# NOTA: Eliminado 'uuid' también (de clicks_train_debug_spark.csv)
from pyspark.sql.types import StringType, IntegerType, FloatType, StructField, StructType
def mi_estructura(nombre_campo):
if(nombre_campo in campos_enteros):
return(StructField(nombre_campo, IntegerType(), True))
elif(nombre_campo in campos_string):
return(StructField(nombre_campo, StringType(), True))
else:
return(StructField(nombre_campo, FloatType(), True))
campos = [mi_estructura(fld_name) for fld_name in cabecera.split(",")]
estructura = StructType(campos)
# toDF() NO FUNCIONA PORQUE LOS TIPOS NO COINCIDEN (?) full_trainset = f.toDF(estructura)
# ASÍ QUE LEEMOS DE NUEVO EL CSV, PERO AHORA CON LA ESTRUCTURA (SCHEMA):
full_trainset = spark.read.csv("gs://jjtzapata/clicks_train_debug_spark.csv", schema = estructura, header = True, mode = "DROPMALFORMED")
#full_trainset.createOrReplaceTempView("full_trainset")
#full_trainset.take(2)
#full_trainset.describe().show()
#
# FIND CORRELATION BETWEEN PREDICTORS AND TARGET:
#
for i in full_trainset.columns:
if not( isinstance(full_trainset.select(i).take(1)[0][0], str) | isinstance(full_trainset.select(i).take(1)[0][0], unicode) ) :
p = full_trainset.stat.corr("clicked",i)
if(p > 0.5):
print( "Correlation to OUTCOME (clicked) for ", i, p)
#
# SELECCIONAMOS VARIABLES:
#
from pyspark.ml.linalg import Vectors
def transformToLabeledPoint(row) :
lp = ( row["clicked"], \
Vectors.dense([
row["numAds"], \
row["timestamp"], \
row["topics_prob"], \
row["ad_topics_prob"], \
row["entities_prob"], \
row["ad_entities_prob"], \
row["categories_prob"], \
row["ad_categories_prob"]
]))
return lp
train_lp = full_trainset.rdd.map(transformToLabeledPoint)
#train_lp.collect()[:5]
train_df = spark.createDataFrame(train_lp, ["label", "features"])# miSparkSession.createDataFrame(train_lp, ["label", "features"])
#train_df.select("label","features").show(10)
#
# PCA (PRINCIPAL COMPONENTS):
#
from pyspark.ml.feature import PCA
numComps = 3
bankPCA = PCA(k=numComps, inputCol="features", outputCol="pcaFeatures") # Nos quedamos con las 3 primeras componentes principales
pcaModel = bankPCA.fit(train_df)
pcaResult = pcaModel.transform(train_df).select("label","pcaFeatures")
pcaResult.show(truncate=False)
#### Hasta aquí todo bien (en Google Cloud Dataproc)!
# Para conectarse al Linux de Google Cloud Dataproc:
# - Instalar Google Cloud SDK o mejor usar la web (google cloud console) o usar Kitty (coñazo crear ssh keys, etc.)
# - abrimos Spark-Python (pyspark)
# - Ya está ("miSparkSession" es "spark" y "sc" es "sc")
# Para usar XGBoost, hay que instalarlo:
# 1.- en la consola de Linux: [ssh cluster-jjtzapata-m.europe-west1-d.evident-galaxy-150614]
# git clone --recursive https://github.com/dmlc/xgboost
# cd xgboost/
# make -j4
# sudo apt-get install python-setuptools
# [Instalar NumPy, SciPy, etc. (TARDA UN HUEVO):] sudo apt-get install python-numpy python-scipy python-matplotlib ipython ipython-notebook python-pandas python-sympy python-nose
# cd python-package
# sudo python setup.py install
#
# hadoop fs -copyFromLocal /home/jjtzapata/trainset.libsvm
#
import xgboost as xgb
dtrain = xgb.DMatrix("/home/jjtzapata/trainset.libsvm#dtrain.cache")
# NOTA "#dtrain.cache" es para la versión con caché de disco, para ficheros "GRANDES"...
#dtrain = xgb.DMatrix("hdfs:///trainset.libsvm/#dtrain.cache") # ESTO NO FUCNIONA IS XGBOOST NO ESTÁ COMPILADO CON LA OPCIÓN "HDFS"...
# dtrain = xgb.DMatrix(train_df.select("features"), label = train_df.select("label"))
param = {'max_depth':2, 'eta':1, 'silent':1, 'objective':'binary:logistic', 'eval_metric':'map'}
num_round = 20
cv_xr = xgb.cv(param, dtrain, num_boost_round=num_round)
cv_xr
# help(xgb.cv) # para ver todas las opciones!
# make prediction
dtest = xgb.DMatrix("/home/jjtzapata/testset.libsvm")
preds = bst.predict(dtest)
preds[1:10]
dtrain[1:10,1:3]
xr = xgb.XGBClassifier
cv_xr = xgb.cv.fit(full_trainset, y = full_trainset['clicked'])
xr.predict(X_test)
#
# H2O:
#
# git clone http://github.com/h2oai/sparkling-water
# cd sparkling-water
# sudo su
# # export SPARK_HOME="/path/to/spark/installation"
# export SPARK_HOME=/usr/lib/spark
# export MASTER='local[*]'
# mkdir -p $(pwd)/private/
# curl -s http://h2o-release.s3.amazonaws.com/h2o/rel-turing/10/Python/h2o-3.10.0.10-py2.py3-none-any.whl > $(pwd)/private/h2o.whl
# export H2O_PYTHON_WHEEL=$(pwd)/private/h2o.whl
# ./gradlew build -x check
# export HADOOP_HOME=/usr/lib/spark
cd sparkling-water
bin/pysparkling
#from operator import add
#wc = f.flatMap(lambda x: x.split(" ")).map(lambda x: (x,1)).reduceByKey(add)
#print(wc.collect())
#f.saveAsTextFile("clicks_train_prueba.csv")
# ********************************************************************************************************************************************
# ********************************************************************************************************************************************
# virtualenv + pyspark + keras + tensorlfow: [http://henning.kropponline.de/2016/09/17/running-pyspark-with-virtualenv/]
#
NOTA: Lo que sigue hay que hacerlo en cada máquina del cluster SPARK (master y nodos):
NOTA: Estamos en: jjtoharia@cluster-jjtzapata-m:~$ [pwd = /home/jjtoharia] (o en cluster-jjtzapata-w0 o en cluster-jjtzapata-w1...)
sudo apt-get install python-pip
sudo pip install virtualenv
virtualenv kaggle
virtualenv --relocatable kaggle
source kaggle/bin/activate
# No sé si hace falta numpy, pero lo hice antes de instalar keras:
pip install numpy
# Mostrar la versión de numpy:
python -c "import numpy as np; print('Python numpy v. ' + np.version.version)"
pip install keras
pip install tensorflow
# Para Windows (64 bits), si pip no funciona: [https://www.tensorflow.org/get_started/os_setup#pip_installation_on_windows]
# conda install python=3.5 [para hacer un "downgrade" de Anaconda a Python 3.5, mientras tensorflow para Windows llega a la 3.6 o superior]
# # pip install --upgrade https://storage.googleapis.com/tensorflow/windows/cpu/tensorflow-0.12.1-cp35-cp35m-win_amd64.whl
# Verificar keras en python:
python -c "import keras"
# cambiar "theano" por "tensorflow", si hace falta - [Ctrl-X] - [Y]:
# nano .keras/keras.cnf
pip install pandas
pip install sklearn
# Para leer/guardar pesos en formato .hdf5:
pip install h5py
# Para compartir ficheros binarios (dataframes) entre R-Python
# https://www.google.es/amp/s/blog.rstudio.org/2016/03/29/feather/amp/
sudo apt-get install python-dev
pip install cython
pip install feather-format
# # otra forma de instalarlo (Windows Anaconda3, p.ej.)]
# conda install feather-format -c conda-forge
# elephas (para usar keras en SPARK):
# sudo apt-get install python-dev
# tarda...
pip install elephas
# para elephas (?):
pip install flask
# Para que funcione con keras v1.xxx:
pip install --upgrade --no-deps git+git://github.com/maxpumperla/elephas
sudo nano /etc/spark/conf/spark-env.sh
# Añadir al final del fichero spark-env.sh:
if [ -z "${PYSPARK_PYTHON}" ]; then
export PYSPARK_PYTHON=/home/jjtoharia/kaggle/bin/python2.7
fi
NOTA: De esta forma no hace falta arrancar el virtualenv (con source xxx/bin/activate). Se usará lo instalado en ese lugar de cada máquina (master y nodos).
# sudo reboot
# ********************************************************************************************************************************************
# ********************************************************************************************************************************************
# [en cmd]
source kaggle/bin/activate
python
# [en python/pyspark] [NOTA: cluster-1-m es el nombre del servidor master del cluster de Spark, que antes fue cluster-jjtzapata-m]
import time
def timefunc(f):
def f_timer(*args, **kwargs):
start = time.time()
result = f(*args, **kwargs)
end = time.time()
print f.__name__, ': ', '{:,.4f}'.format(end - start), ' segs.'
return result
return f_timer
s_input_path = 'kaggle/Outbrain/In/python/'
@timefunc
def from_feather_to_csv(fich = 'clicks_X_valid_4-1.feather', s_input_path = 'kaggle/Outbrain/In/python/'):
from feather import read_dataframe as fthr_read_dataframe
from numpy import savetxt as np_savetxt
X = fthr_read_dataframe(s_input_path + fich)
fich = fich.replace('.feather', '_para_spark.csv')
# # Quitamos NAs (ponemos ceros): NO debería haber... (¡¡¡PERO HAY!!!) (uuid_pgvw_hora_min, p.ej.)
# X[isnan(X)] = 0
np_savetxt(s_input_path + fich, X, delimiter=',')
print(fich, X.values.shape, ' Ok.')
return(fich)
def from_feather_to_csv_all():
from os.path import isfile as os_path_isfile
for seq_len in range(2,13):
for nF in range(1, 9999): # 1,...,(n-1)
fichtr = 'clicks_X_train_' + str(seq_len) + '-' + str(nF) + '.feather'
if not os_path_isfile(s_input_path + fichtr):
break # Ya no hay más
fich = 'clicks_X_train_' + str(seq_len) + '-' + str(nF) + '.feather'; fich = from_feather_to_csv(fich)
fich = 'clicks_X_valid_' + str(seq_len) + '-' + str(nF) + '.feather'; fich = from_feather_to_csv(fich)
fich = 'clicks_X_test_' + str(seq_len) + '-' + str(nF) + '.feather'; fich = from_feather_to_csv(fich)
fich = 'clicks_y_train_' + str(seq_len) + '-' + str(nF) + '.feather'; fich = from_feather_to_csv(fich)
fich = 'clicks_y_valid_' + str(seq_len) + '-' + str(nF) + '.feather'; fich = from_feather_to_csv(fich)
fich = 'clicks_y_test_' + str(seq_len) + '-' + str(nF) + '.feather'; fich = from_feather_to_csv(fich)
from_feather_to_csv_all()
[en cmd]
# hadoop fs -copyFromLocal kaggle/Outbrain/In/python/clicks_*_*_*-*.csv
hadoop fs -rm clicks_X_train_4.csv
hadoop fs -appendToFile kaggle/Outbrain/In/python/clicks_X_train_4-*_para_spark.csv clicks_X_train_4.csv
hadoop fs -appendToFile kaggle/Outbrain/In/python/clicks_y_train_4-*_para_spark.csv clicks_y_train_4.csv
hadoop fs -appendToFile kaggle/Outbrain/In/python/clicks_X_valid_4-*_para_spark.csv clicks_X_valid_4.csv
hadoop fs -appendToFile kaggle/Outbrain/In/python/clicks_y_valid_4-*_para_spark.csv clicks_y_valid_4.csv
hadoop fs -appendToFile kaggle/Outbrain/In/python/clicks_X_test_4-*_para_spark.csv clicks_X_test_4.csv
hadoop fs -appendToFile kaggle/Outbrain/In/python/clicks_y_test_4-*_para_spark.csv clicks_y_test_4.csv
hadoop fs -ls
ls -l kaggle/Outbrain/In/python/clicks_X_train_4-*.csv
[en pyspark] [NOTA: cluster-1-m es el nombre del servidor master del cluster de Spark, que antes fue cluster-jjtzapata-m]
# s_spark_inputpath = 'hdfs://cluster-1-m:8020/user/jjtoharia/'
# from pyspark.sql.types import StructType, StructField
# from pyspark.sql.types import DoubleType, IntegerType, StringType
# schema = StructType([
# StructField("A", IntegerType()),
# StructField("B", DoubleType()),
# StructField("C", StringType())
# ])
# schema = StructType([StructField("A", DoubleType())])
# X = spark.read.csv(s_spark_inputpath + 'clicks_X_valid_4-1_para_spark.csv', header=False, mode="DROPMALFORMED", schema=schema)
# y = spark.read.csv(s_spark_inputpath + 'clicks_y_valid_4-1_para_spark.csv', header=False, mode="DROPMALFORMED", schema=schema)
# X.collect()[5]
s_spark_inputpath = 'hdfs://cluster-1-m:8020/user/jjtoharia/'
# Incluimos utilidad pyspark_csv al contexto de Spark:
sc.addPyFile('kaggle/pyspark_csv.py')
# E importamos lo que queremos de la misma:
import pyspark_csv as pycsv
txt_rdd = sc.textFile(s_spark_inputpath + 'clicks_X_valid_4.csv')
txt_rdd.count()
first_rec = txt_rdd.top(1)
first_rec = first_rec[0].split(',')
num_cols = len(first_rec)
from pandas import read_csv
from numpy import float64 as np_float64
X = read_csv(s_input_path + 'clicks_X_valid_4-1_para_spark.csv', dtype=np_float64, header = None)
X2 = read_csv(s_input_path + 'clicks_X_valid_4-2_para_spark.csv', dtype=np_float64, header = None)
y = read_csv(s_input_path + 'clicks_y_valid_4-1_para_spark.csv', dtype=np_float64, header = None)
y2 = read_csv(s_input_path + 'clicks_y_valid_4-2_para_spark.csv', dtype=np_float64, header = None)
from numpy import concatenate as np_concat # Para concatenar varios ficheros en uno (leer_y_reshape)
X = np_concat((X, X2), axis=0)
y = np_concat((y, y2), axis=0)
X.shape, y.shape
num_cols = X.shape[1]
# NOTA: Cuidado que se ordena (por la primera columna...)
dfX = pycsv.csvToDataFrame(sqlCtx, txt_rdd, columns=['Col_' + str(i) for i in range(0,num_cols)])
txt_rdd = sc.textFile(s_spark_inputpath + 'clicks_y_valid_4.csv')
# NOTA: Cuidado que se ordena (por la primera columna...)
dfy = pycsv.csvToDataFrame(sqlCtx, txt_rdd, columns=['Clicked'])
dfX.select(['Col_' + str(i) for i in range(0,4)]).show(10)
dfy.select('Clicked').show(10)
# Ahora estos DataFrame tienen que convertirse en uno como hace [rdd = to_simple_rdd(sc, X_train, y_train)]
PENDIENTE*****
from elephas.utils.rdd_utils import to_simple_rdd
rdd = to_simple_rdd(sc, X_train, Y_train)
[?]
sc.statusTracker().getActiveJobsIds()
sc.statusTracker().getActiveStageIds()
miSparkSession.stop()
sc.stop()
# --------------------------
# # from: https://github.com/maxpumperla/elephas
# from keras.models import Sequential
# from keras.layers.recurrent import LSTM
# from keras.layers.core import Dense, Dropout, Activation
# from keras.optimizers import SGD
# seq_len = 4
# model = Sequential()
# #model.add(Dense(128, input_dim=503))
# #model.add(Activation('relu'))
# model.add(LSTM(input_length=seq_len, input_dim=num_cols, output_dim=lstm_neuronas_ini, dropout_W=dropout_in, dropout_U=dropout_U, return_sequences=(seq_len != 1))) # , activation='relu'))
# #model.add(Dropout(0.2))
# model.add(Dense(128))
# model.add(Activation('relu'))
# model.add(Dropout(0.2))
# model.add(Dense(2)) # Es 2 por culpa de to_categorical()
# model.add(Activation('softmax'))
# model.compile(loss='categorical_crossentropy', optimizer=SGD())
# model.get_weights()
# from pandas import read_csv
# from numpy import float64 as np_float64
# s_input_path = 'kaggle/Outbrain/In/python/'
# X = read_csv(s_input_path + 'clicks_X_valid_4-1_para_spark.csv', dtype=np_float64, header = None)
# X = X.values
# y = read_csv(s_input_path + 'clicks_y_valid_4-1_para_spark.csv', dtype=int, header = None)
# y = y.values
# from keras.utils.np_utils import to_categorical
# X2, y2 = mi_reshape(X, to_categorical(y), seq_len) # Ponemos dos clases (columnas) a y
# X.shape, y.shape, X2.shape, y2.shape
# from elephas.utils.rdd_utils import to_simple_rdd
# rdd = to_simple_rdd(sc, X, y_bin) # y[:,0])
# from elephas.spark_model import SparkModel
# from elephas import optimizers as elephas_optimizers
# adagrad = elephas_optimizers.Adagrad()
# mi_spark_model = SparkModel(sc,model, optimizer=adagrad, frequency='epoch', mode='asynchronous', num_workers=4)
# mi_spark_model.train(rdd, nb_epoch=20, batch_size=batchsize, verbose=0, validation_split=0.1)
# #scores = model.evaluate(X, y_bin, verbose=0, batch_size=batchsize)
# #print('1 - Loss: %.4f%%' % (100-scores[0]*100))
# #probs = model.predict_proba(X_test, batch_size=batchsize)[:,1] # Nos quedamos con las probs del "1"
# probs = mi_spark_model.predict(X_test)[:,1] # Nos quedamos con las probs del "1"
# print('1 - Loss: %.4f%%' % (100*(1-log_loss(y_bin[:,1], probs))))
#
# ------------------------------------------------------------
import time
def timefunc(f):
def f_timer(*args, **kwargs):
start = time.time()
result = f(*args, **kwargs)
end = time.time()
print f.__name__, ': ', '{:,.4f}'.format(end - start), ' segs.'
return result
return f_timer
from keras.models import Sequential
from keras.layers.core import Dense
from keras.layers.recurrent import LSTM
# -> import crear_modelo
@timefunc
def crear_modelo(seq_len, num_capas, num_cols, lstm_neuronas_ini, lstm_neuronas_mid, lstm_neuronas_fin, dropout_in, dropout_U, mi_loss, mi_optimizador, mis_metrics, b_Spark = False):
print('Create the model:')
model = Sequential()
#model.add(Embedding(input_dim=top_words, output_dim=embedding_vector_length, input_length=seq_len))
if(num_capas == 1):
model.add(LSTM(input_length=seq_len, input_dim=num_cols, output_dim=lstm_neuronas_ini, dropout_W=dropout_in, dropout_U=dropout_U, return_sequences=(seq_len != 1))) # , activation='relu'))
else:
model.add(LSTM(input_length=seq_len, input_dim=num_cols, output_dim=lstm_neuronas_ini, dropout_W=dropout_in, dropout_U=dropout_U, return_sequences=True)) # , activation='relu'))
if(num_capas == 2):
model.add(LSTM(output_dim=lstm_neuronas_fin, dropout_W=dropout_in, dropout_U=dropout_U, return_sequences=(seq_len != 1))) # , activation='relu'))
else:
model.add(LSTM(output_dim=lstm_neuronas_mid, dropout_W=dropout_in, dropout_U=dropout_U, return_sequences=True)) # , activation='relu'))
model.add(LSTM(output_dim=lstm_neuronas_fin, dropout_W=dropout_in, dropout_U=dropout_U, return_sequences=(seq_len != 1))) # , activation='relu'))
# Capa de salida:
model.add(LSTM(output_dim=(2 if b_Spark else 1), dropout_W=dropout_in, dropout_U=dropout_U, activation='sigmoid', return_sequences=(seq_len != 1)))
model.compile(loss=mi_loss, optimizer=mi_optimizador, metrics=mis_metrics)
print(model.summary())
return(model)
# ########################################################
# PREPARAMOS PRIMERO EL RDD Y LO GUARDAMOS EN HADOOP:
# ########################################################
from keras.utils.np_utils import to_categorical
from numpy import reshape as np_reshape
from numpy import concatenate as np_concat # Para concatenar varios ficheros en uno (leer_y_reshape)
from pandas import read_csv
from numpy import float64 as np_float64
from os.path import isfile as os_path_isfile
def mi_reshape(X, y, seq_len = 1):
if len(X.shape) == 3:
X = np_reshape(X, (int((X.shape[0] * X.shape[1])/seq_len), seq_len, X.shape[2]))
else:
X = np_reshape(X, (int(X.shape[0]/seq_len), seq_len, X.shape[1]))
if not y is None:
if len(y.shape) == 3:
y = np_reshape(y, (int((y.shape[0] * y.shape[1])/seq_len), seq_len, y.shape[2]))
else:
if seq_len != 1:
y = np_reshape(y, (int(y.shape[0]/seq_len), seq_len, y.shape[1]))
print(X.shape, y.shape)
else:
print(X.shape)
return [X, y]
def mi_reshape_probs(probs, seq_len = 1):
if len(probs.shape) == 3:
if seq_len != probs.shape[1]:
print('NOTA: La dimensión Seq_Len de probs NO coincide con el param. seq_len!')
probs = np_reshape(probs, (probs.shape[0] * probs.shape[1], probs.shape[2]))
print(probs.shape)
return(probs)
b_Spark = True
s_input_path = 'kaggle/Outbrain/In/python/'
s_output_path = 'kaggle/Outbrain/Out/python/'
s_spark_inputpath = 'hdfs://cluster-1-m:8020/user/jjtoharia/'
numSparkWorkers = 4
def preparar_RDD(seq_len = 0):
from elephas.utils.rdd_utils import to_simple_rdd
from os import rename as os_rename
for nF in range(1, 99): # 1,...,(n-1)
fichtr = 'clicks_X_train_' + str(seq_len) + '-' + str(nF) + '_para_spark.csv'
if os_path_isfile(s_input_path + fichtr):
print('Leyendo ficheros train+valid ' + str(nF) + ' - numAds ' + str(seq_len) + '...')
X_train = read_csv(s_input_path + 'clicks_X_train_' + str(seq_len) + '-' + str(nF) + '_para_spark.csv', dtype=np_float64, header = None).values
y_train = read_csv(s_input_path + 'clicks_y_train_' + str(seq_len) + '-' + str(nF) + '_para_spark.csv', dtype=int, header = None).values
X_valid = read_csv(s_input_path + 'clicks_X_valid_' + str(seq_len) + '-' + str(nF) + '_para_spark.csv', dtype=np_float64, header = None).values
y_valid = read_csv(s_input_path + 'clicks_y_valid_' + str(seq_len) + '-' + str(nF) + '_para_spark.csv', dtype=int, header = None).values
print(X_train.shape, y_train.shape, X_valid.shape, y_valid.shape)
X_train, y_train = mi_reshape(X_train, to_categorical(y_train), seq_len)
X_valid, y_valid = mi_reshape(X_valid, to_categorical(y_valid), seq_len)
X_train = np_concat((X_train, X_valid), axis=0) # Incluimos validset dentro del trainset en Spark
y_train = np_concat((y_train, y_valid), axis=0) # Incluimos validset dentro del trainset en Spark
print(X_train.shape, y_train.shape)
print('Creando RDD (train+valid) ' + str(nF) + ' - numAds ' + str(seq_len) + '...')
rdd_ini = to_simple_rdd(sc, X_train, y_train)
# Convertimos ndarray [ i.e. array(...) ] en list [ i.e. [...] ]:
rdd_lista = rdd_ini.map(lambda i: map(lambda s: s.tolist(), i))
# Y ahora guardamos como txt:
rdd_lista.coalesce(numSparkWorkers, True).saveAsTextFile(s_spark_inputpath + 'clicks_train_seq' + str(seq_len) + '-' + str(nF) + '_rdd') # Forzamos a guardarlo en 4 trozos (al menos)
print('Ok. Guardado en HDFS el RDD (train+valid) ' + str(nF) + ' - numAds ' + str(seq_len) + '.')
os_rename(s_input_path + fichtr, s_input_path + 'ok_en_hdfs/' + 'clicks_X_train_' + str(seq_len) + '-' + str(nF) + '_para_spark.csv')
for seq_len in range(2,13):
preparar_RDD(seq_len)
seq_len = 4
dropout_in = 0.3
dropout_U = 0.3
batchsize = 1000
num_capas = 1 # 1, 2 ó 3
lstm_neuronas_ini = 48 # 192
lstm_neuronas_mid = 24 # 48
lstm_neuronas_fin = 12 # 12
mi_early_stop = 10 # si no hay mejora (en val_loss) en N rounds seguidos, se detiene el fit (training)
iteraciones = 2
mi_loss = 'binary_crossentropy' # mi_loss = 'mean_absolute_error'
mi_optimizador = 'adam'
mis_metrics = ['accuracy'] # mis_metrics = ['precision']
# ########################################################
# # LEEMOS DATOS (rdd) YA PREPARADOS DESDE HADOOP:
# ########################################################
rdd_train_txt = sc.textFile(s_spark_inputpath + 'clicks_train_seq' + str(seq_len) + '-1_rdd')
from numpy import array as np_array
rdd_train_ok = rdd_train_txt.map(lambda s: eval(s)).map(lambda j: map(lambda s: np_array(s), j))
print(rdd_train_ok.getNumPartitions()) # Debería devolver numSparkWorkers == 4 (o más)
# Obtenemos el número de columnas (num_cols) y el tamaño de la secuencia (seq_len) del RDD:
primer_reg = rdd_train_ok.take(1)
seq_len = len(primer_reg[0][0]) # 4
num_cols = len(primer_reg[0][0][0]) # = 503
num_reg_train = rdd_train_ok.count()
print('seq_len = ', seq_len, 'num_cols = ', num_cols)
# ########################################################
# LEEMOS DATOS DE TEST (ndarray), PARA EVALUAR:
# ########################################################
X_test = read_csv(s_input_path + 'clicks_X_test_' + str(seq_len) + '-1_para_spark.csv', dtype=np_float64, header = None).values
y_test = read_csv(s_input_path + 'clicks_y_test_' + str(seq_len) + '-1_para_spark.csv', dtype=int, header = None).values
print(X_test.shape, y_test.shape)
X3_test, y3_test = mi_reshape(X_test, to_categorical(y_test), seq_len)
print(X3_test.shape, y3_test.shape)
# ########################################################
model=crear_modelo(seq_len, num_capas, num_cols, lstm_neuronas_ini, lstm_neuronas_mid, lstm_neuronas_fin, dropout_in, dropout_U, mi_loss, mi_optimizador, mis_metrics, b_Spark)
# ########################################################
from elephas.spark_model import SparkModel
from elephas import optimizers as elephas_optimizers
adagrad = elephas_optimizers.Adagrad()
mi_spark_model = SparkModel(sc,model, optimizer=adagrad, frequency='epoch', mode='asynchronous', num_workers=numSparkWorkers)
# ########################################################
print(' =============== ENTRENANDO... ================= ')
# ########################################################
from sklearn.metrics import log_loss
@timefunc
def entrenar_spark(mi_spark_model, rdd_train_ok, iteraciones, batchsize, verbose=0, validation_split=0.1):
mi_spark_model.train(rdd_train_ok, nb_epoch=iteraciones, batch_size=batchsize, verbose=verbose, validation_split=validation_split)
@timefunc
def evaluar_spark(mi_spark_model, X3_test, y_test):
seq_len = X_test.shape[1]
#scores = model.evaluate(X3_test, y3_test, verbose=0, batch_size=batchsize)
#print('1 - Loss: %.2f%%' % (100-scores[0]*100))
#probs = model.predict_proba(X3_test, batch_size=batchsize)[:,1] # Nos quedamos con las probs del "1"
probs = mi_spark_model.predict(X3_test)
print(probs.shape)
# probs = mi_reshape_probs(probs, seq_len)[:,1:] # Nos quedamos con las probs del "1" (por alguna razón aparecen a cero... ???)
print('1 - Loss: %.4f%%' % (100*(1-log_loss(y_test, mi_reshape_probs(probs, seq_len)[:,1:]))))
return(probs)
# ########################################################
print(' =============== ENTRENANDO... ================= ')
# ########################################################
iteraciones = 5
entrenar_spark(mi_spark_model, rdd_train_ok, iteraciones, batchsize, verbose = 1, validation_split = 0.2)
# ########################################################
print(' =============== EVALUAMOS... ================= ')
# ########################################################
probs = evaluar_spark(mi_spark_model, X3_test, y_test)
probs[0:2]
# ########################################################
print(' ======== GUARDAMOS PREDS Y MODELO... =========')
# ########################################################
from numpy import savetxt as np_savetxt
def descr_modelo(model, num_reg_train, batchsize, tipo_descr = 1):
model_conf_capa_1 = model.get_config()[0]['config']
num_cols = model_conf_capa_1['input_dim']
dropout_in = model_conf_capa_1['dropout_W']
dropout_U = model_conf_capa_1['dropout_U']
num_capas = len(model.get_config()) - 1
seq_len = model_conf_capa_1['input_length']
lstm_neuronas_ini = model_conf_capa_1['output_dim']
lstm_neuronas_mid = 0
lstm_neuronas_fin = 0
if(num_capas > 1):
lstm_neuronas_fin = model.get_config()[1]['config']['output_dim']
if(num_capas > 2):
lstm_neuronas_mid = lstm_neuronas_fin
lstm_neuronas_fin = model.get_config()[2]['config']['output_dim']
if tipo_descr == 1:
descr = 'bch-' + str(batchsize) + '_dri-' + str(dropout_in) + '_dru-' + str(dropout_U) + '_reg-' + str(num_reg_train) + '_col-' + str(num_cols)
descr = descr + '_ini-' + str(lstm_neuronas_ini) + '_mid-' + str(lstm_neuronas_mid) + '_fin-' + str(lstm_neuronas_fin)
descr = descr + '_seq-' + str(seq_len)
else: # if tipo_descr == 2:
descr = '(BatchSize = ' + str(batchsize) + ')' + '. (Dropout_in = ' + str(dropout_in) + '. Dropout_U = ' + str(dropout_U) + ')'
descr = descr + '. (SeqLen = ' + str(seq_len) + ')'
descr = descr + ' - (Nodos = ' + str(lstm_neuronas_ini)
descr = descr + ( ',' + str(lstm_neuronas_mid) if num_capas == 3 else '')
descr = descr + ( ',' + str(lstm_neuronas_fin) if num_capas >= 2 else '') + ')'
descr = descr + ' - ' + str(num_reg_train) + ' regs/' + str(num_cols) + ' cols'
return(descr)
def guardar_modelo_json(model, pre_post, batchsize):
fichname = descr_modelo(model, num_reg_train, batchsize)
fichname = 'modelo' + pre_post + '_' + fichname + '.json'
print('Guardando modelo (json) [' + 'python/' + fichname + ']...')
with open(s_output_path + fichname, 'w') as json_file:
json_file.write(model.to_json())
@timefunc
def guardar_preds(model, X_test, indice = 0, b_csv = True, numAds = 0, numAdsFich = 0, num_reg_train = 0, num_cols = 0, iteraciones = 0, batchsize = 0, dropout_in = 0, dropout_U = 0):
mi_X_train_shape = [num_reg_train, num_cols]
str_fich = '_debug' if numAds == 0 else '_{n}-{m}'.format(n=numAds,m=numAdsFich)
str_fich = 'test_probs' + str_fich + ('' if indice == 0 else '_' + str(indice))
print('Guardando resultados (probs) en ' + 'python/' + str_fich + ('.csv' if b_csv else '.feather') + '...')
#probs = model.predict_proba(X_test, batch_size=batchsize)
probs = mi_spark_model.predict(X3_test)
probs = mi_reshape_probs(probs, seq_len)[:,1:] # Volvemos a dos dimensiones
# print(probs[1:10])
if b_csv:
np_savetxt(s_input_path + str_fich + '.csv', probs, delimiter=',')
else:
fthr_write_dataframe(DataFrame(probs), s_input_path + str_fich + '.feather')
print('\nOk. [' + 'In/python/' + str_fich + ('.csv' if b_csv else '.feather') + ']')
np_savetxt(s_output_path + str_fich + '_' + str(iteraciones) + '_' + str(batchsize) + '-' + str(num_reg_train) + '.log', mi_X_train_shape, delimiter=',')
print('Ok. [' + 'python/' + str_fich + '_' + str(iteraciones) + '_' + str(batchsize) + '-' + str(num_reg_train) + '.log]')
guardar_modelo_json(model, 'post', batchsize) # Guardamos estructura también al final.
print('\nOk. (Iter = ' + str(iteraciones) + '. BatchSize = ' + str(batchsize) + ')' + '. (Dropout_in = ' + str(dropout_in) + '. Dropout_U = ' + str(dropout_U) + ') - ' + str(num_reg_train) + ' regs/' + str(num_cols) + ' cols')
guardar_preds(model, X3_test, 0, True, seq_len, 0, num_reg_train, num_cols, iteraciones, batchsize, dropout_in, dropout_U)
| mit |
larsmans/scikit-learn | examples/model_selection/plot_train_error_vs_test_error.py | 349 | 2577 | """
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
| bsd-3-clause |
pxsdirac/tushare | tushare/datayes/IV.py | 10 | 3423 | # -*- coding:utf-8 -*-
"""
通联数据
Created on 2015/10/12
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
from pandas.compat import StringIO
import pandas as pd
from tushare.util import vars as vs
from tushare.util.common import Client
from tushare.util import upass as up
class IV():
def __init__(self, client=None):
if client is None:
self.client = Client(up.get_token())
else:
self.client = client
def DerIv(self, beginDate='', endDate='', optID='', SecID='', field=''):
"""
原始隐含波动率,包括期权价格、累计成交量、持仓量、隐含波动率等。
"""
code, result = self.client.getData(vs.DERIV%(beginDate, endDate, optID, SecID, field))
return _ret_data(code, result)
def DerIvHv(self, beginDate='', endDate='', SecID='', period='', field=''):
"""
历史波动率,各个时间段的收盘-收盘历史波动率。
"""
code, result = self.client.getData(vs.DERIVHV%(beginDate, endDate, SecID, period, field))
return _ret_data(code, result)
def DerIvIndex(self, beginDate='', endDate='', SecID='', period='', field=''):
"""
隐含波动率指数,衡量30天至1080天到期平价期权的平均波动性的主要方法。
"""
code, result = self.client.getData(vs.DERIVINDEX%(beginDate, endDate, SecID, period, field))
return _ret_data(code, result)
def DerIvIvpDelta(self, beginDate='', endDate='', SecID='', delta='', period='', field=''):
"""
隐含波动率曲面(基于参数平滑曲线),基于delta(0.1至0.9,0.05升步)和到期日(1个月至3年)而标准化的曲面。
"""
code, result = self.client.getData(vs.DERIVIVPDELTA%(beginDate, endDate, SecID, delta, period, field))
return _ret_data(code, result)
def DerIvParam(self, beginDate='', endDate='', SecID='', expDate='', field=''):
"""
隐含波动率参数化曲面,由二阶方程波动曲线在每个到期日平滑后的曲面(a,b,c曲线系数)
"""
code, result = self.client.getData(vs.DERIVPARAM%(beginDate, endDate, SecID, expDate, field))
return _ret_data(code, result)
def DerIvRawDelta(self, beginDate='', endDate='', SecID='', delta='', period='', field=''):
"""
隐含波动率曲面(基于原始隐含波动率),基于delta(0.1至0.9,0.05升步)和到期日(1个月至3年)而标准化的曲面。
"""
code, result = self.client.getData(vs.DERIVRAWDELTA%(beginDate, endDate, SecID, delta, period, field))
return _ret_data(code, result)
def DerIvSurface(self, beginDate='', endDate='', SecID='', contractType='', field=''):
"""
隐含波动率曲面(在值程度),基于在值程度而标准化的曲面。执行价格区间在-60%到+60%,5%升步,到期区间为1个月至3年。
"""
code, result = self.client.getData(vs.DERIVSURFACE%(beginDate, endDate, SecID, contractType, field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
| bsd-3-clause |
Adai0808/scikit-learn | sklearn/covariance/tests/test_graph_lasso.py | 272 | 5245 | """ Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
| bsd-3-clause |
architecture-building-systems/CEAforArcGIS | cea/technologies/solar/photovoltaic_thermal.py | 2 | 42510 | """
Photovoltaic thermal panels
"""
import os
import time
from itertools import repeat
from math import *
import geopandas as gpd
import numpy as np
import pandas as pd
from geopandas import GeoDataFrame as gdf
from numba import jit
import cea.inputlocator
import cea.utilities.parallel
import cea.utilities.workerstream
from cea.constants import HOURS_IN_YEAR
from cea.technologies.solar import constants
from cea.technologies.solar.photovoltaic import (calc_properties_PV_db, calc_PV_power, calc_diffuseground_comp,
calc_absorbed_radiation_PV, calc_cell_temperature)
from cea.technologies.solar.solar_collector import (calc_properties_SC_db, calc_IAM_beam_SC, calc_q_rad, calc_q_gain,
vectorize_calc_Eaux_SC, calc_optimal_mass_flow,
calc_optimal_mass_flow_2, calc_qloss_network)
from cea.utilities import epwreader
from cea.utilities import solar_equations
from cea.utilities.standardize_coordinates import get_lat_lon_projected_shapefile
from cea.analysis.costs.equations import calc_capex_annualized, calc_opex_annualized
__author__ = "Jimeno A. Fonseca"
__copyright__ = "Copyright 2015, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Jimeno A. Fonseca, Shanshan Hsieh"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "[email protected]"
__status__ = "Production"
def calc_PVT(locator, config, latitude, longitude, weather_data, date_local, building_name):
"""
This function first determines the surface area with sufficient solar radiation, and then calculates the optimal
tilt angles of panels at each surface location. The panels are categorized into groups by their surface azimuths,
tilt angles, and global irradiation. In the last, electricity and heat generation from PVT panels of each group are calculated.
:param locator: An InputLocator to locate input files
:type locator: cea.inputlocator.InputLocator
:param radiation_json_path: path to solar insulation data on all surfaces of each building
:type radiation_json_path: string
:param metadata_csv_path: path to data of sensor points measuring solar insulation of each building
:type metadata_csv_path: string
:param latitude: latitude of the case study location
:type latitude: float
:param longitude: longitude of the case study location
:type longitude: float
:param weather_path: path to the weather data file of the case study location
:type weather_path: .epw
:param building_name: list of building names in the case study
:type building_name: Series
:param T_in: inlet temperature to the solar collectors [C]
:return: Building_PVT.csv with solar collectors heat generation potential of each building, Building_PVT_sensors.csv
with sensor data of each PVT panel.
"""
t0 = time.perf_counter()
radiation_json_path = locator.get_radiation_building_sensors(building_name)
metadata_csv_path = locator.get_radiation_metadata(building_name)
# solar properties
solar_properties = solar_equations.calc_sun_properties(latitude, longitude, weather_data, date_local, config)
print('calculating solar properties done for building %s' % building_name)
# get properties of the panel to evaluate # TODO: find a PVT module reference
panel_properties_PV = calc_properties_PV_db(locator.get_database_conversion_systems(), config)
panel_properties_SC = calc_properties_SC_db(locator.get_database_conversion_systems(), config)
print('gathering properties of PVT collector panel for building %s' % building_name)
# select sensor point with sufficient solar radiation
max_annual_radiation, annual_radiation_threshold, sensors_rad_clean, sensors_metadata_clean = \
solar_equations.filter_low_potential(radiation_json_path, metadata_csv_path, config)
print('filtering low potential sensor points done for building %s' % building_name)
# Calculate the heights of all buildings for length of vertical pipes
tot_bui_height_m = gpd.read_file(locator.get_zone_geometry())['height_ag'].sum()
# set the maximum roof coverage
if config.solar.custom_roof_coverage:
max_roof_coverage = config.solar.max_roof_coverage
else:
max_roof_coverage = 1.0
if not sensors_metadata_clean.empty:
if not config.solar.custom_tilt_angle:
# calculate optimal angle and tilt for panels according to PV module size
sensors_metadata_cat = solar_equations.optimal_angle_and_tilt(sensors_metadata_clean, latitude,
solar_properties,
max_annual_radiation, panel_properties_PV,
max_roof_coverage)
print('calculating optimal tile angle and separation done for building %s' % building_name)
else:
# calculate spacing required by user-supplied tilt angle for panels
sensors_metadata_cat = solar_equations.calc_spacing_custom_angle(sensors_metadata_clean, solar_properties,
max_annual_radiation, panel_properties_PV,
config.solar.panel_tilt_angle,
max_roof_coverage)
print('calculating separation for custom tilt angle done')
# group the sensors with the same tilt, surface azimuth, and total radiation
sensor_groups = solar_equations.calc_groups(sensors_rad_clean, sensors_metadata_cat)
print('generating groups of sensor points done for building %s' % building_name)
Final = calc_PVT_generation(sensor_groups, weather_data, date_local, solar_properties, latitude,
tot_bui_height_m, panel_properties_SC, panel_properties_PV, config)
Final.to_csv(locator.PVT_results(building=building_name), index=True, float_format='%.2f', na_rep='nan')
sensors_metadata_cat.to_csv(locator.PVT_metadata_results(building=building_name), index=True,
index_label='SURFACE',
float_format='%.2f', na_rep='nan') # print selected metadata of the selected sensors
print('Building', building_name, 'done - time elapsed:', (time.perf_counter() - t0), ' seconds')
else: # This block is activated when a building has not sufficient solar potential
Final = pd.DataFrame(
{'Date': date_local, 'PVT_walls_north_E_kWh': 0.0, 'PVT_walls_north_m2': 0.0, 'PVT_walls_north_Q_kWh': 0.0,
'PVT_walls_north_Tout_C': 0.0,
'PVT_walls_south_E_kWh': 0.0, 'PVT_walls_south_m2': 0, 'PVT_walls_south_Q_kWh': 0.0,
'PVT_walls_south_Tout_C': 0.0,
'PVT_walls_east_E_kWh': 0.0, 'PVT_walls_east_m2': 0.0, 'PVT_walls_east_Q_kWh': 0.0,
'PVT_walls_east_Tout_C': 0.0,
'PVT_walls_west_E_kWh': 0.0, 'PVT_walls_west_m2': 0.0, 'PVT_walls_west_Q_kWh': 0.0,
'PVT_walls_west_Tout_C': 0.0,
'PVT_roofs_top_E_kWh': 0.0, 'PVT_roofs_top_m2': 0.0, 'PVT_roofs_top_Q_kWh': 0.0,
'PVT_roofs_top_Tout_C': 0.0,
'Q_PVT_gen_kWh': 0.0, 'T_PVT_sup_C': 0.0, 'T_PVT_re_C': 0.0,
'mcp_PVT_kWperC': 0.0, 'Eaux_PVT_kWh': 0.0,
'Q_PVT_l_kWh': 0.0, 'E_PVT_gen_kWh': 0.0, 'Area_PVT_m2': 0.0,
'radiation_kWh': 0.0}, index=range(HOURS_IN_YEAR))
Final.to_csv(locator.PVT_results(building=building_name), index=False, float_format='%.2f', na_rep='nan')
sensors_metadata_cat = pd.DataFrame(
{'SURFACE': 0, 'AREA_m2': 0, 'BUILDING': 0, 'TYPE': 0, 'Xcoor': 0, 'Xdir': 0, 'Ycoor': 0, 'Ydir': 0,
'Zcoor': 0, 'Zdir': 0, 'orientation': 0, 'total_rad_Whm2': 0, 'tilt_deg': 0, 'B_deg': 0,
'array_spacing_m': 0, 'surface_azimuth_deg': 0, 'area_installed_module_m2': 0,
'CATteta_z': 0, 'CATB': 0, 'CATGB': 0, 'type_orientation': 0}, index=range(2))
sensors_metadata_cat.to_csv(locator.PVT_metadata_results(building=building_name), index=False,
float_format='%.2f', na_rep='nan')
return
def calc_PVT_generation(sensor_groups, weather_data, date_local, solar_properties, latitude, tot_bui_height_m,
panel_properties_SC, panel_properties_PV, config):
"""
To calculate the heat and electricity generated from PVT panels.
:param sensor_groups: properties of sensors in each group
:type sensor_groups: dict
:param weather_data: weather data read from .epw
:type weather_data: dataframe
:param solar_properties:
:param latitude: latitude of the case study location
:param tot_bui_height_m: total height of all buildings [m]
:param panel_properties_SC: properties of solar thermal collectors
:param panel_properties_PV: properties of photovoltaic panels
:param config: user settings from cea.config
:return:
"""
# read variables
number_groups = sensor_groups['number_groups'] # number of groups of sensor points
prop_observers = sensor_groups['prop_observers'] # mean values of sensor properties of each group of sensors
hourly_radiation_Wperm2 = sensor_groups[
'hourlydata_groups'] # mean hourly radiation of sensors in each group [Wh/m2]
T_in_C = get_t_in_pvt(config)
# convert degree to radians
lat_rad = radians(latitude)
g_rad = np.radians(solar_properties.g)
ha_rad = np.radians(solar_properties.ha)
Sz_rad = np.radians(solar_properties.Sz)
# calculate equivalent length of pipes
total_area_module_m2 = prop_observers['area_installed_module_m2'].sum() # total area for panel installation
total_pipe_lengths = calc_pipe_equivalent_length(panel_properties_PV, panel_properties_SC, tot_bui_height_m,
total_area_module_m2)
# empty lists to store results
list_groups_area = [0 for i in range(number_groups)]
total_el_output_PV_kWh = [0 for i in range(number_groups)]
total_radiation_kWh = [0 for i in range(number_groups)]
total_mcp_kWperC = [0 for i in range(number_groups)]
total_qloss_kWh = [0 for i in range(number_groups)]
total_aux_el_kWh = [0 for i in range(number_groups)]
total_Qh_output_kWh = [0 for i in range(number_groups)]
list_results_from_PVT = list(range(number_groups))
potential = pd.DataFrame(index=range(HOURS_IN_YEAR))
panel_orientations = ['walls_south', 'walls_north', 'roofs_top', 'walls_east', 'walls_west']
for panel_orientation in panel_orientations:
potential['PVT_' + panel_orientation + '_Q_kWh'] = 0.0
potential['PVT_' + panel_orientation + '_E_kWh'] = 0.0
potential['PVT_' + panel_orientation + '_m2'] = 0.0
# assign default number of subsdivisions for the calculation
if panel_properties_SC['type'] == 'ET': # ET: evacuated tubes
panel_properties_SC['Nseg'] = 100 # default number of subsdivisions for the calculation
else:
panel_properties_SC['Nseg'] = 10
for group in range(number_groups):
# read panel properties of each group
teta_z_deg = prop_observers.loc[group, 'surface_azimuth_deg']
module_area_per_group_m2 = prop_observers.loc[group, 'area_installed_module_m2']
tilt_angle_deg = prop_observers.loc[group, 'B_deg'] # tilt angle of panels
# degree to radians
tilt_rad = radians(tilt_angle_deg) # tilt angle
teta_z_rad = radians(teta_z_deg) # surface azimuth
# calculate radiation types (direct/diffuse) in group
radiation_Wperm2 = solar_equations.cal_radiation_type(group, hourly_radiation_Wperm2, weather_data)
## calculate absorbed solar irradiation on tilt surfaces
# calculate effective indicent angles necessary
teta_rad = np.vectorize(solar_equations.calc_angle_of_incidence)(g_rad, lat_rad, ha_rad, tilt_rad, teta_z_rad)
teta_ed_rad, teta_eg_rad = calc_diffuseground_comp(tilt_rad)
# absorbed radiation and Tcell
absorbed_radiation_PV_Wperm2 = np.vectorize(calc_absorbed_radiation_PV)(radiation_Wperm2.I_sol,
radiation_Wperm2.I_direct,
radiation_Wperm2.I_diffuse, tilt_rad,
Sz_rad, teta_rad, teta_ed_rad,
teta_eg_rad, panel_properties_PV)
T_cell_C = np.vectorize(calc_cell_temperature)(absorbed_radiation_PV_Wperm2, weather_data.drybulb_C,
panel_properties_PV)
## SC heat generation
# calculate incidence angle modifier for beam radiation
IAM_b = calc_IAM_beam_SC(solar_properties, teta_z_deg, tilt_angle_deg, panel_properties_SC['type'], latitude)
list_results_from_PVT[group] = calc_PVT_module(config, radiation_Wperm2, panel_properties_SC,
panel_properties_PV,
weather_data.drybulb_C, IAM_b, tilt_angle_deg,
total_pipe_lengths,
absorbed_radiation_PV_Wperm2, T_cell_C, module_area_per_group_m2)
# calculate results from each group
panel_orientation = prop_observers.loc[group, 'type_orientation']
number_modules_per_group = module_area_per_group_m2 / (panel_properties_PV['module_length_m'] ** 2)
PVT_Q_kWh = list_results_from_PVT[group][1] * number_modules_per_group
PVT_E_kWh = list_results_from_PVT[group][6]
# write results
potential['PVT_' + panel_orientation + '_Q_kWh'] = potential['PVT_' + panel_orientation + '_Q_kWh'] + PVT_Q_kWh
potential['PVT_' + panel_orientation + '_E_kWh'] = potential['PVT_' + panel_orientation + '_E_kWh'] + PVT_E_kWh
potential['PVT_' + panel_orientation + '_m2'] = potential[
'PVT_' + panel_orientation + '_m2'] + module_area_per_group_m2
# aggregate results from all modules
list_groups_area[group] = module_area_per_group_m2
total_mcp_kWperC[group] = list_results_from_PVT[group][5] * number_modules_per_group
total_qloss_kWh[group] = list_results_from_PVT[group][0] * number_modules_per_group
total_aux_el_kWh[group] = list_results_from_PVT[group][2] * number_modules_per_group
total_Qh_output_kWh[group] = list_results_from_PVT[group][1] * number_modules_per_group
total_el_output_PV_kWh[group] = list_results_from_PVT[group][6]
total_radiation_kWh[group] = hourly_radiation_Wperm2[group] * module_area_per_group_m2 / 1000
potential['Area_PVT_m2'] = sum(list_groups_area)
potential['radiation_kWh'] = sum(total_radiation_kWh).values
potential['E_PVT_gen_kWh'] = sum(total_el_output_PV_kWh)
potential['Q_PVT_gen_kWh'] = sum(total_Qh_output_kWh)
potential['mcp_PVT_kWperC'] = sum(total_mcp_kWperC)
potential['Eaux_PVT_kWh'] = sum(total_aux_el_kWh)
potential['Q_PVT_l_kWh'] = sum(total_qloss_kWh)
potential['T_PVT_sup_C'] = np.zeros(HOURS_IN_YEAR) + T_in_C
T_out_C = (potential['Q_PVT_gen_kWh'] / potential['mcp_PVT_kWperC']) + T_in_C
potential['T_PVT_re_C'] = T_out_C if T_out_C is not np.nan else np.nan # assume parallel connections for all panels
potential['Date'] = date_local
potential = potential.set_index('Date')
return potential
def calc_pipe_equivalent_length(panel_properties_PV, panel_properties_SC, tot_bui_height_m, total_area_module_m2):
# local variables
lv = panel_properties_PV['module_length_m'] # module length
total_area_aperture = total_area_module_m2 * panel_properties_SC[
'aperture_area_ratio']
number_modules = round(
total_area_module_m2 / (panel_properties_PV['module_length_m'] ** 2)) # this is an estimation
# main calculation
l_ext_mperm2 = (2 * lv * number_modules / total_area_aperture) # pipe length within the collectors
l_int_mperm2 = 2 * tot_bui_height_m / total_area_aperture # pipe length from building substation to roof top collectors
Leq_mperm2 = l_int_mperm2 + l_ext_mperm2 # in m/m2 aperture
pipe_equivalent_lengths_mperm2 = {'Leq_mperm2': Leq_mperm2, 'l_ext_mperm2': l_ext_mperm2,
'l_int_mperm2': l_int_mperm2}
return pipe_equivalent_lengths_mperm2
def get_t_in_pvt(config):
if config.solar.t_in_pvt is not None:
Tin_C = config.solar.T_in_PVT
else:
Tin_C = constants.T_IN_PVT
return Tin_C
def calc_PVT_module(config, radiation_Wperm2, panel_properties_SC, panel_properties_PV, Tamb_vector_C, IAM_b,
tilt_angle_deg, pipe_lengths, absorbed_radiation_PV_Wperm2, Tcell_PV_C, module_area_per_group_m2):
"""
This function calculates the heat & electricity production from PVT collectors.
The heat production calculation is adapted from calc_SC_module and then the updated cell temperature is used to
calculate PV electricity production.
:param tilt_angle_deg: solar panel tilt angle [rad]
:param IAM_b_vector: incident angle modifier for beam radiation [-]
:param I_direct_vector: direct radiation [W/m2]
:param I_diffuse_vector: diffuse radiation [W/m2]
:param Tamb_vector_C: dry bulb temperature [C]
:param IAM_d_vector: incident angle modifier for diffuse radiation [-]
:param Leq: equivalent length of pipes per aperture area [m/m2 aperture)
:param Le: equivalent length of collector pipes per aperture area [m/m2 aperture]
:param absorbed_radiation_PV_Wperm2: absorbed solar radiation of PV module [Wh/m2]
:param Tcell_PV_C: PV cell temperature [C]
:param module_area_per_group_m2: PV module area [m2]
:return:
..[J. Allan et al., 2015] J. Allan, Z. Dehouche, S. Stankovic, L. Mauricette. "Performance testing of thermal and
photovoltaic thermal solar collectors." Energy Science & Engineering 2015; 3(4): 310-326
"""
# read variables
Tin_C = get_t_in_pvt(config)
n0 = panel_properties_SC['n0'] # zero loss efficiency at normal incidence [-]
c1 = panel_properties_SC[
'c1'] # collector heat loss coefficient at zero temperature difference and wind speed [W/m2K]
c2 = panel_properties_SC['c2'] # temperature difference dependency of the heat loss coefficient [W/m2K2]
mB0_r = panel_properties_SC['mB0_r'] # nominal flow rate per aperture area [kg/h/m2 aperture]
mB_max_r = panel_properties_SC['mB_max_r'] # maximum flow rate per aperture area
mB_min_r = panel_properties_SC['mB_min_r'] # minimum flow rate per aperture area
C_eff_Jperm2K = panel_properties_SC['C_eff'] # thermal capacitance of module [J/m2K]
IAM_d = panel_properties_SC['IAM_d'] # incident angle modifier for diffuse radiation [-]
dP1 = panel_properties_SC['dP1'] # pressure drop [Pa/m2] at zero flow rate
dP2 = panel_properties_SC['dP2'] # pressure drop [Pa/m2] at nominal flow rate (mB0)
dP3 = panel_properties_SC['dP3'] # pressure drop [Pa/m2] at maximum flow rate (mB_max)
dP4 = panel_properties_SC['dP4'] # pressure drop [Pa/m2] at minimum flow rate (mB_min)
Cp_fluid_JperkgK = panel_properties_SC['Cp_fluid'] # J/kgK
aperature_area_ratio = panel_properties_SC['aperture_area_ratio'] # aperature area ratio [-]
area_pv_module = panel_properties_PV['module_length_m'] ** 2
Nseg = panel_properties_SC['Nseg']
T_max_C = panel_properties_SC['t_max']
eff_nom = panel_properties_PV['PV_n']
Bref = panel_properties_PV['PV_Bref']
misc_losses = panel_properties_PV['misc_losses']
aperture_area_m2 = aperature_area_ratio * area_pv_module # aperture area of each module [m2]
msc_max_kgpers = mB_max_r * aperture_area_m2 / 3600 # maximum mass flow [kg/s]
# Do the calculation of every time step for every possible flow condition
# get states where highly performing values are obtained.
specific_flows_kgpers = [np.zeros(HOURS_IN_YEAR), (np.zeros(HOURS_IN_YEAR) + mB0_r) * aperture_area_m2 / 3600,
(np.zeros(HOURS_IN_YEAR) + mB_max_r) * aperture_area_m2 / 3600,
(np.zeros(HOURS_IN_YEAR) + mB_min_r) * aperture_area_m2 / 3600, np.zeros(HOURS_IN_YEAR),
np.zeros(HOURS_IN_YEAR)] # in kg/s
specific_pressure_losses_Pa = [np.zeros(HOURS_IN_YEAR), (np.zeros(HOURS_IN_YEAR) + dP2) * aperture_area_m2,
(np.zeros(HOURS_IN_YEAR) + dP3) * aperture_area_m2,
(np.zeros(HOURS_IN_YEAR) + dP4) * aperture_area_m2, np.zeros(HOURS_IN_YEAR),
np.zeros(HOURS_IN_YEAR)] # in Pa
# generate empty lists to store results
temperature_out = [np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR),
np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR)]
temperature_in = [np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR),
np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR)]
supply_out_kW = [np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR),
np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR)]
supply_losses_kW = [np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR),
np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR)]
auxiliary_electricity_kW = [np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR),
np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR),
np.zeros(HOURS_IN_YEAR)]
temperature_mean = [np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR),
np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR), np.zeros(HOURS_IN_YEAR)]
mcp_kWperK = np.zeros(HOURS_IN_YEAR)
T_module_C = np.zeros(HOURS_IN_YEAR)
# calculate absorbed radiation
tilt_rad = radians(tilt_angle_deg)
q_rad_vector = np.vectorize(calc_q_rad)(n0, IAM_b, IAM_d, radiation_Wperm2.I_direct, radiation_Wperm2.I_diffuse,
tilt_rad) # absorbed solar radiation in W/m2 is a mean of the group
counter = 0
Flag = False
Flag2 = False
for flow in range(6):
Mo_seg = 1 # mode of segmented heat loss calculation. only one mode is implemented.
TIME0 = 0
DELT = 1 # timestep 1 hour
delts = DELT * 3600 # convert time step in seconds
Tfl = np.zeros(3) # create vector to store value at previous [1] and present [2] time-steps
DT = np.zeros(3)
Tabs = np.zeros(3)
STORED = np.zeros(600)
TflA = np.zeros(600)
TflB = np.zeros(600)
TabsB = np.zeros(600)
TabsA = np.zeros(600)
q_gain_Seg = np.zeros(101) # maximum Iseg = maximum Nseg + 1 = 101
for time in range(HOURS_IN_YEAR):
# c1_pvt = c1 - eff_nom * Bref * absorbed_radiation_PV_Wperm2[time] #todo: to delete
c1_pvt = calc_cl_pvt(Bref, absorbed_radiation_PV_Wperm2, c1, eff_nom, time)
Mfl_kgpers = calc_Mfl_kgpers(DELT, Nseg, STORED, TIME0, Tin_C, specific_flows_kgpers[flow], time,
Cp_fluid_JperkgK, C_eff_Jperm2K, aperture_area_m2)
# calculate average fluid temperature and average absorber temperature at the beginning of the time-step
Tamb_C = Tamb_vector_C[time]
q_rad_Wperm2 = q_rad_vector[time]
Tout_C = calc_Tout_C(Cp_fluid_JperkgK, DT, Mfl_kgpers, Nseg, STORED, Tabs, Tamb_C, Tfl, Tin_C,
aperture_area_m2, c1_pvt, q_rad_Wperm2)
# calculate q_gain with the guess for DT[1]
q_gain_Wperm2 = calc_q_gain(Tfl, q_rad_Wperm2, DT, Tin_C, aperture_area_m2, c1_pvt, c2,
Mfl_kgpers, delts, Cp_fluid_JperkgK, C_eff_Jperm2K, Tamb_C)
Aseg_m2 = aperture_area_m2 / Nseg # aperture area per segment
# multi-segment calculation to avoid temperature jump at times of flow rate changes
Tout_Seg_C = do_multi_segment_calculation(Aseg_m2, C_eff_Jperm2K, Cp_fluid_JperkgK, DT, Mfl_kgpers, Mo_seg,
Nseg, STORED, Tabs, TabsA, Tamb_C, Tfl, TflA, TflB, Tin_C, Tout_C,
c1_pvt, c2, delts, q_gain_Seg, q_gain_Wperm2, q_rad_Wperm2)
# resulting energy output
q_out_kW = Mfl_kgpers * Cp_fluid_JperkgK * (Tout_Seg_C - Tin_C) / 1000 # [kW]
Tabs[2] = 0
# storage of the mean temperature
for Iseg in range(1, Nseg + 1):
STORED[200 + Iseg] = TflB[Iseg]
STORED[400 + Iseg] = TabsB[Iseg]
Tabs[2] = Tabs[2] + TabsB[Iseg] / Nseg
# outputs
temperature_out[flow][time] = Tout_Seg_C
temperature_in[flow][time] = Tin_C
supply_out_kW[flow][time] = q_out_kW
temperature_mean[flow][time] = (Tin_C + Tout_Seg_C) / 2 # Mean absorber temperature at present
q_gain_Wperm2 = 0
TavgB = 0
TavgA = 0
for Iseg in range(1, Nseg + 1):
q_gain_Wperm2 = q_gain_Wperm2 + q_gain_Seg * Aseg_m2 # W
TavgA = TavgA + TflA[Iseg] / Nseg
TavgB = TavgB + TflB[Iseg] / Nseg
# # OUT[9] = qgain/Area_a # in W/m2
# q_mtherm_Wperm2 = (TavgB - TavgA) * C_eff_Jperm2K * aperture_area_m2 / delts
# q_balance_error = q_gain_Wperm2 - q_mtherm_Wperm2 - q_out_kW
# OUT[11] = q_mtherm
# OUT[12] = q_balance_error
if flow < 4:
auxiliary_electricity_kW[flow] = vectorize_calc_Eaux_SC(specific_flows_kgpers[flow],
specific_pressure_losses_Pa[flow], pipe_lengths,
aperture_area_m2) # in kW
if flow == 3:
q1 = supply_out_kW[0]
q2 = supply_out_kW[1]
q3 = supply_out_kW[2]
q4 = supply_out_kW[3]
E1 = auxiliary_electricity_kW[0]
E2 = auxiliary_electricity_kW[1]
E3 = auxiliary_electricity_kW[2]
E4 = auxiliary_electricity_kW[3]
specific_flows_kgpers[4], specific_pressure_losses_Pa[4] = calc_optimal_mass_flow(q1, q2, q3, q4, E1, E2,
E3, E4, 0, mB0_r,
mB_max_r, mB_min_r, 0,
dP2, dP3, dP4,
aperture_area_m2)
if flow == 4:
auxiliary_electricity_kW[flow] = vectorize_calc_Eaux_SC(specific_flows_kgpers[flow],
specific_pressure_losses_Pa[flow], pipe_lengths,
aperture_area_m2) # in kW
dp5 = specific_pressure_losses_Pa[flow]
q5 = supply_out_kW[flow]
m5 = specific_flows_kgpers[flow]
# set points to zero when load is negative
specific_flows_kgpers[5], specific_pressure_losses_Pa[5] = calc_optimal_mass_flow_2(m5, q5, dp5)
if flow == 5: # optimal mass flow
supply_losses_kW[flow] = np.vectorize(calc_qloss_network)(specific_flows_kgpers[flow],
pipe_lengths['l_ext_mperm2'],
aperture_area_m2, temperature_mean[flow],
Tamb_vector_C, msc_max_kgpers)
supply_out_pre = supply_out_kW[flow].copy() + supply_losses_kW[flow].copy()
auxiliary_electricity_kW[flow] = vectorize_calc_Eaux_SC(specific_flows_kgpers[flow],
specific_pressure_losses_Pa[flow], pipe_lengths,
aperture_area_m2) # in kW
supply_out_total_kW = supply_out_kW + 0.5 * auxiliary_electricity_kW[flow] - supply_losses_kW[flow]
mcp_kWperK = specific_flows_kgpers[flow] * (Cp_fluid_JperkgK / 1000) # mcp in kW/c
turn_off_the_water_circuit_if_total_energy_supply_is_zero(T_module_C, Tcell_PV_C, auxiliary_electricity_kW[flow],
mcp_kWperK, supply_out_total_kW[5], temperature_in[5],
temperature_out[5])
el_output_PV_kW = np.vectorize(calc_PV_power)(absorbed_radiation_PV_Wperm2, T_module_C, eff_nom,
module_area_per_group_m2,
Bref, misc_losses)
# write results into a list
result = [supply_losses_kW[5], supply_out_total_kW[5], auxiliary_electricity_kW[5], temperature_out[5],
temperature_in[5], mcp_kWperK,
el_output_PV_kW]
return result
@jit(nopython=True)
def calc_cl_pvt(Bref, absorbed_radiation_PV_Wperm2, c1, eff_nom, time):
c1_pvt = max(0, c1 - eff_nom * Bref * absorbed_radiation_PV_Wperm2[time]) # _[J. Allan et al., 2015] eq.(18)
return c1_pvt
@jit(nopython=True)
def turn_off_the_water_circuit_if_total_energy_supply_is_zero(T_module_C, Tcell_PV_C, auxiliary_electricity_kW,
mcp_kWperK, supply_out_total_kW, temperature_in,
temperature_out):
for x in range(HOURS_IN_YEAR):
# turn off the water circuit if total energy supply is zero
if supply_out_total_kW[x] <= 0:
supply_out_total_kW[x] = 0
mcp_kWperK[x] = 0
auxiliary_electricity_kW[x] = 0
temperature_out[x] = 0
temperature_in[x] = 0
# update pv cell temperature with temperatures of the water circuit
T_module_mean_C = (temperature_out[x] + temperature_in[x]) / 2
T_module_C[x] = T_module_mean_C if T_module_mean_C > 0 else Tcell_PV_C[x]
@jit(nopython=True)
def do_multi_segment_calculation(Aseg_m2, C_eff_Jperm2K, Cp_fluid_JperkgK, DT, Mfl_kgpers, Mo_seg, Nseg, STORED, Tabs,
TabsA, Tamb_C, Tfl, TflA, TflB, Tin_C, Tout_C, c1_pvt, c2, delts, q_gain_Seg,
q_gain_Wperm2, q_rad_Wperm2):
Tout_Seg_C = 0.0 # this value will be overwritten after first iteration
for Iseg in range(1, Nseg + 1):
# get temperatures of the previous time-step
TflA[Iseg] = STORED[100 + Iseg]
TabsA[Iseg] = STORED[300 + Iseg]
if Iseg > 1:
Tin_Seg = Tout_Seg_C
else:
Tin_Seg = Tin_C
if Mfl_kgpers > 0 and Mo_seg == 1: # same heat gain/ losses for all segments
Tout_Seg_C = ((Mfl_kgpers * Cp_fluid_JperkgK * (Tin_Seg + 273.15)) / Aseg_m2 - (
C_eff_Jperm2K * (Tin_Seg + 273.15)) / (2 * delts) + q_gain_Wperm2 +
(C_eff_Jperm2K * (TflA[Iseg] + 273.15) / delts)) / (
Mfl_kgpers * Cp_fluid_JperkgK / Aseg_m2 + C_eff_Jperm2K / (2 * delts))
Tout_Seg_C = Tout_Seg_C - 273.15 # in [C]
TflB[Iseg] = (Tin_Seg + Tout_Seg_C) / 2
else: # heat losses based on each segment's inlet and outlet temperatures.
Tfl[1] = TflA[Iseg]
Tabs[1] = TabsA[Iseg]
q_gain_Wperm2 = calc_q_gain(Tfl, q_rad_Wperm2, DT, Tin_Seg, Aseg_m2, c1_pvt, c2,
Mfl_kgpers, delts, Cp_fluid_JperkgK, C_eff_Jperm2K, Tamb_C)
Tout_Seg_C = Tout_C
if Mfl_kgpers > 0:
TflB[Iseg] = (Tin_Seg + Tout_Seg_C) / 2
Tout_Seg_C = TflA[Iseg] + (q_gain_Wperm2 * delts) / C_eff_Jperm2K
else:
TflB[Iseg] = Tout_Seg_C
# the following lines do not perform meaningful operation, the iterations on DT are performed in calc_q_gain
# these lines are kept here as a reference to the original model in FORTRAN
# q_fluid_Wperm2 = (Tout_Seg_C - Tin_Seg) * Mfl_kgpers * Cp_fluid_JperkgK / Aseg_m2
# q_mtherm_Wperm2 = (TflB[Iseg] - TflA[Iseg]) * C_eff_Jperm2K / delts
# q_balance_error = q_gain_Wperm2 - q_fluid_Wperm2 - q_mtherm_Wperm2
# # assert abs(q_balance_error) > 1, "q_balance_error in photovoltaic-thermal calculation"
q_gain_Seg[Iseg] = q_gain_Wperm2 # in W/m2
return Tout_Seg_C
@jit(nopython=True)
def calc_Tout_C(Cp_fluid_JperkgK, DT, Mfl_kgpers, Nseg, STORED, Tabs, Tamb_C, Tfl, Tin_C, aperture_area_m2, c1_pvt,
q_rad_Wperm2):
Tfl[1] = 0 # mean fluid temperature
Tabs[1] = 0 # mean absorber temperature
for Iseg in range(1, Nseg + 1):
Tfl[1] = Tfl[1] + STORED[100 + Iseg] / Nseg # mean fluid temperature
Tabs[1] = Tabs[1] + STORED[300 + Iseg] / Nseg # mean absorber temperature
# first guess for Delta T
if Mfl_kgpers > 0:
Tout_C = Tin_C + (q_rad_Wperm2 - ((c1_pvt) + 0.5) * (Tin_C - Tamb_C)) / (
Mfl_kgpers * Cp_fluid_JperkgK / aperture_area_m2)
Tfl[2] = (Tin_C + Tout_C) / 2 # mean fluid temperature at present time-step
else:
# if c1_pvt < 0:
# print('c1_pvt: ', c1_pvt)
Tout_C = Tamb_C + q_rad_Wperm2 / (c1_pvt + 0.5)
Tfl[2] = Tout_C # fluid temperature same as output
# if Tout_C > T_max_C:
# print('Tout_C: ',Tout_C, 'c1_pvt: ', c1_pvt, 'q_rad', q_rad_Wperm2)
DT[1] = Tfl[2] - Tamb_C # difference between mean absorber temperature and the ambient temperature
return Tout_C
@jit(nopython=True)
def calc_Mfl_kgpers(DELT, Nseg, STORED, TIME0, Tin_C, specific_flows_kgpers, time, Cp_fluid_JperkgK, C_eff_Jperm2K,
aperture_area_m2):
Mfl_kgpers = specific_flows_kgpers[time]
if time < TIME0 + DELT / 2:
for Iseg in range(101, 501): # 400 points with the data
STORED[Iseg] = Tin_C
else:
for Iseg in range(1, Nseg): # 400 points with the data
STORED[100 + Iseg] = STORED[200 + Iseg]
STORED[300 + Iseg] = STORED[400 + Iseg]
# calculate stability criteria
if Mfl_kgpers > 0:
stability_criteria = Mfl_kgpers * Cp_fluid_JperkgK * Nseg * (DELT * 3600) / (
C_eff_Jperm2K * aperture_area_m2)
if stability_criteria <= 0.5:
print('ERROR: stability criteria', stability_criteria, 'is not reached.',
'aperture_area:', aperture_area_m2, 'mass flow:', Mfl_kgpers)
return Mfl_kgpers
# investment and maintenance costs
def calc_Cinv_PVT(PVT_peak_W, locator, technology=0):
"""
P_peak in kW
result in CHF
technology = 0 represents the first technology when there are multiple technologies.
FIXME: handle multiple technologies when cost calculations are done
"""
if PVT_peak_W > 0.0:
PVT_cost_data = pd.read_excel(locator.get_database_conversion_systems(), sheet_name="PV")
technology_code = list(set(PVT_cost_data['code']))
PVT_cost_data = PVT_cost_data[PVT_cost_data['code'] == technology_code[technology]]
# if the Q_design is below the lowest capacity available for the technology, then it is replaced by the least
# capacity for the corresponding technology from the database
if PVT_peak_W < PVT_cost_data['cap_min'].values[0]:
PVT_peak_W = PVT_cost_data['cap_min'].values[0]
PVT_cost_data = PVT_cost_data[
(PVT_cost_data['cap_min'] <= PVT_peak_W) & (PVT_cost_data['cap_max'] > PVT_peak_W)]
Inv_a = PVT_cost_data.iloc[0]['a']
Inv_b = PVT_cost_data.iloc[0]['b']
Inv_c = PVT_cost_data.iloc[0]['c']
Inv_d = PVT_cost_data.iloc[0]['d']
Inv_e = PVT_cost_data.iloc[0]['e']
Inv_IR = PVT_cost_data.iloc[0]['IR_%']
Inv_LT = PVT_cost_data.iloc[0]['LT_yr']
Inv_OM = PVT_cost_data.iloc[0]['O&M_%'] / 100
InvC = Inv_a + Inv_b * (PVT_peak_W) ** Inv_c + (Inv_d + Inv_e * PVT_peak_W) * log(PVT_peak_W)
Capex_a = calc_capex_annualized(InvC, Inv_IR, Inv_LT)
Opex_fixed = InvC * Inv_OM
Capex = InvC
else:
Capex_a = Opex_fixed = Capex = 0.0
return Capex_a, Opex_fixed, Capex
def main(config):
assert os.path.exists(config.scenario), 'Scenario not found: %s' % config.scenario
locator = cea.inputlocator.InputLocator(scenario=config.scenario)
print('Running photovoltaic-thermal with scenario = %s' % config.scenario)
print(
'Running photovoltaic-thermal with annual-radiation-threshold-kWh/m2 = %s' % config.solar.annual_radiation_threshold)
print('Running photovoltaic-thermal with panel-on-roof = %s' % config.solar.panel_on_roof)
print('Running photovoltaic-thermal with panel-on-wall = %s' % config.solar.panel_on_wall)
print('Running photovoltaic-thermal with solar-window-solstice = %s' % config.solar.solar_window_solstice)
print('Running photovoltaic-thermal with t-in-pvt = %s' % config.solar.t_in_pvt)
print('Running photovoltaic-thermal with type-pvpanel = %s' % config.solar.type_pvpanel)
if config.solar.custom_tilt_angle:
print('Running photovoltaic with custom-tilt-angle = %s and panel-tilt-angle = %s' %
(config.solar.custom_tilt_angle, config.solar.panel_tilt_angle))
else:
print('Running photovoltaic with custom-tilt-angle = %s' % config.solar.custom_tilt_angle)
if config.solar.custom_roof_coverage:
print('Running photovoltaic with custom-roof-coverage = %s and max-roof-coverage = %s' %
(config.solar.custom_roof_coverage, config.solar.max_roof_coverage))
else:
print('Running photovoltaic with custom-roof-coverage = %s' % config.solar.custom_roof_coverage)
building_names = config.solar.buildings
hourly_results_per_building = gdf.from_file(locator.get_zone_geometry())
latitude, longitude = get_lat_lon_projected_shapefile(hourly_results_per_building)
# weather hourly_results_per_building
weather_data = epwreader.epw_reader(locator.get_weather_file())
date_local = solar_equations.calc_datetime_local_from_weather_file(weather_data, latitude, longitude)
print('reading weather hourly_results_per_building done.')
n = len(building_names)
cea.utilities.parallel.vectorize(calc_PVT, config.get_number_of_processes())(repeat(locator, n),
repeat(config, n),
repeat(latitude, n),
repeat(longitude, n),
repeat(weather_data, n),
repeat(date_local, n),
building_names)
# aggregate results from all buildings
aggregated_annual_results = {}
for i, building in enumerate(building_names):
hourly_results_per_building = pd.read_csv(locator.PVT_results(building))
if i == 0:
aggregated_hourly_results_df = hourly_results_per_building
temperature_sup = []
temperature_re = []
temperature_sup.append(hourly_results_per_building['T_PVT_sup_C'])
temperature_re.append(hourly_results_per_building['T_PVT_re_C'])
else:
aggregated_hourly_results_df = aggregated_hourly_results_df + hourly_results_per_building
temperature_sup.append(hourly_results_per_building['T_PVT_sup_C'])
temperature_re.append(hourly_results_per_building['T_PVT_re_C'])
annual_energy_production = hourly_results_per_building.filter(like='_kWh').sum()
panel_area_per_building = hourly_results_per_building.filter(like='_m2').iloc[0]
building_annual_results = annual_energy_production.append(panel_area_per_building)
aggregated_annual_results[building] = building_annual_results
# save hourly results
aggregated_hourly_results_df['T_PVT_sup_C'] = pd.DataFrame(temperature_sup).mean(axis=0)
aggregated_hourly_results_df['T_PVT_re_C'] = pd.DataFrame(temperature_re).mean(axis=0)
aggregated_hourly_results_df = aggregated_hourly_results_df[aggregated_hourly_results_df.columns.drop(
aggregated_hourly_results_df.filter(like='Tout', axis=1).columns)] # drop columns with Tout
aggregated_hourly_results_df = aggregated_hourly_results_df.set_index('Date')
aggregated_hourly_results_df.to_csv(locator.PVT_totals(), index=True, float_format='%.2f', na_rep='nan')
# save annual results
aggregated_annual_results_df = pd.DataFrame(aggregated_annual_results).T
aggregated_annual_results_df.to_csv(locator.PVT_total_buildings(), index=True, index_label="Name", float_format='%.2f', na_rep='nan')
if __name__ == '__main__':
main(cea.config.Configuration())
| mit |
cactusbin/nyt | matplotlib/doc/users/plotting/examples/simple_annotate01.py | 5 | 3309 |
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
x1, y1 = 0.3, 0.3
x2, y2 = 0.7, 0.7
fig = plt.figure(1)
fig.clf()
from mpl_toolkits.axes_grid.axes_grid import Grid
from mpl_toolkits.axes_grid.anchored_artists import AnchoredText
from matplotlib.font_manager import FontProperties
def add_at(ax, t, loc=2):
fp = dict(size=10)
_at = AnchoredText(t, loc=loc, prop=fp)
ax.add_artist(_at)
return _at
grid = Grid(fig, 111, (4, 4), label_mode="1", share_all=True)
grid[0].set_autoscale_on(False)
ax = grid[0]
ax.plot([x1, x2], [y1, y2], "o")
ax.annotate("",
xy=(x1, y1), xycoords='data',
xytext=(x2, y2), textcoords='data',
arrowprops=dict(arrowstyle="->"))
add_at(ax, "A $->$ B", loc=2)
ax = grid[1]
ax.plot([x1, x2], [y1, y2], "o")
ax.annotate("",
xy=(x1, y1), xycoords='data',
xytext=(x2, y2), textcoords='data',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=0.3"))
add_at(ax, "connectionstyle=arc3", loc=2)
ax = grid[2]
ax.plot([x1, x2], [y1, y2], "o")
ax.annotate("",
xy=(x1, y1), xycoords='data',
xytext=(x2, y2), textcoords='data',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=0.3",
shrinkB=5,
)
)
add_at(ax, "shrinkB=5", loc=2)
ax = grid[3]
ax.plot([x1, x2], [y1, y2], "o")
el = mpatches.Ellipse((x1, y1), 0.3, 0.4, angle=30, alpha=0.5)
ax.add_artist(el)
ax.annotate("",
xy=(x1, y1), xycoords='data',
xytext=(x2, y2), textcoords='data',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=0.2",
)
)
ax = grid[4]
ax.plot([x1, x2], [y1, y2], "o")
el = mpatches.Ellipse((x1, y1), 0.3, 0.4, angle=30, alpha=0.5)
ax.add_artist(el)
ax.annotate("",
xy=(x1, y1), xycoords='data',
xytext=(x2, y2), textcoords='data',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=0.2",
patchB=el,
)
)
add_at(ax, "patchB", loc=2)
ax = grid[5]
ax.plot([x1], [y1], "o")
ax.annotate("Test",
xy=(x1, y1), xycoords='data',
xytext=(x2, y2), textcoords='data',
ha="center", va="center",
bbox=dict(boxstyle="round",
fc="w",
),
arrowprops=dict(arrowstyle="->",
#connectionstyle="arc3,rad=0.2",
)
)
add_at(ax, "annotate", loc=2)
ax = grid[6]
ax.plot([x1], [y1], "o")
ax.annotate("Test",
xy=(x1, y1), xycoords='data',
xytext=(x2, y2), textcoords='data',
ha="center", va="center",
bbox=dict(boxstyle="round",
fc="w",
),
arrowprops=dict(arrowstyle="->",
#connectionstyle="arc3,rad=0.2",
relpos=(0., 0.)
)
)
add_at(ax, "relpos=(0,0)", loc=2)
#ax.set_xlim(0, 1)
#ax.set_ylim(0, 1)
plt.draw()
| unlicense |
aminert/scikit-learn | sklearn/utils/metaestimators.py | 283 | 2353 | """Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# Licence: BSD
from operator import attrgetter
from functools import update_wrapper
__all__ = ['if_delegate_has_method']
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if the ``attribute_name`` is not present on the base object.
This allows ducktyping of the decorated method based on ``attribute_name``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, attribute_name):
self.fn = fn
self.get_attribute = attrgetter(attribute_name)
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self.get_attribute(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
>>> from sklearn.utils.metaestimators import if_delegate_has_method
>>>
>>>
>>> class MetaEst(object):
... def __init__(self, sub_est):
... self.sub_est = sub_est
...
... @if_delegate_has_method(delegate='sub_est')
... def predict(self, X):
... return self.sub_est.predict(X)
...
>>> class HasPredict(object):
... def predict(self, X):
... return X.sum(axis=1)
...
>>> class HasNoPredict(object):
... pass
...
>>> hasattr(MetaEst(HasPredict()), 'predict')
True
>>> hasattr(MetaEst(HasNoPredict()), 'predict')
False
"""
return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
| bsd-3-clause |
hsuantien/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py | 254 | 2005 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.