repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
nupic-community/menorah
|
menorah/nupic_output.py
|
1
|
6056
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Provides two classes with the same signature for writing data out of NuPIC
models.
(This is a component of the One Hot Gym Prediction Tutorial.)
"""
import os
import csv
from collections import deque
from abc import ABCMeta, abstractmethod
# Try to import matplotlib, but we don't have to.
try:
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.dates import date2num
except ImportError:
pass
WINDOW = 300
class NuPICOutput(object):
__metaclass__ = ABCMeta
def __init__(self, names, showAnomalyScore=False):
self.names = names
self.showAnomalyScore = showAnomalyScore
@abstractmethod
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
pass
@abstractmethod
def close(self):
pass
class NuPICFileOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICFileOutput, self).__init__(*args, **kwargs)
self.outputFiles = []
self.outputWriters = []
self.lineCounts = []
headerRow = ['timestamp', 'value', 'prediction']
for name in self.names:
self.lineCounts.append(0)
outputFileName = os.path.join(name, "predictions.csv")
print "Preparing to output %s data to %s" % (name, outputFileName)
outputFile = open(outputFileName, "w")
self.outputFiles.append(outputFile)
outputWriter = csv.writer(outputFile)
self.outputWriters.append(outputWriter)
outputWriter.writerow(headerRow)
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
assert len(timestamps) == len(actualValues) == len(predictedValues)
for index in range(len(self.names)):
timestamp = timestamps[index]
actual = actualValues[index]
prediction = predictedValues[index]
writer = self.outputWriters[index]
if timestamp is not None:
outputRow = [timestamp, actual, prediction]
writer.writerow(outputRow)
self.lineCounts[index] += 1
def close(self):
for index, name in enumerate(self.names):
self.outputFiles[index].close()
print "Done. Wrote %i data lines to %s." % (self.lineCounts[index], name)
class NuPICPlotOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICPlotOutput, self).__init__(*args, **kwargs)
# Turn matplotlib interactive mode on.
plt.ion()
self.dates = []
self.convertedDates = []
self.actualValues = []
self.predictedValues = []
self.actualLines = []
self.predictedLines = []
self.linesInitialized = False
self.graphs = []
plotCount = len(self.names)
plotHeight = max(plotCount * 3, 6)
fig = plt.figure(figsize=(14, plotHeight))
gs = gridspec.GridSpec(plotCount, 1)
for index in range(len(self.names)):
self.graphs.append(fig.add_subplot(gs[index, 0]))
plt.title(self.names[index])
plt.ylabel(*args[0])
plt.xlabel('Date')
plt.tight_layout()
def initializeLines(self, timestamps):
for index in range(len(self.names)):
print "initializing %s" % self.names[index]
# graph = self.graphs[index]
self.dates.append(deque([timestamps[index]] * WINDOW, maxlen=WINDOW))
self.convertedDates.append(deque(
[date2num(date) for date in self.dates[index]], maxlen=WINDOW
))
self.actualValues.append(deque([0.0] * WINDOW, maxlen=WINDOW))
self.predictedValues.append(deque([0.0] * WINDOW, maxlen=WINDOW))
actualPlot, = self.graphs[index].plot(
self.dates[index], self.actualValues[index]
)
self.actualLines.append(actualPlot)
predictedPlot, = self.graphs[index].plot(
self.dates[index], self.predictedValues[index]
)
self.predictedLines.append(predictedPlot)
self.linesInitialized = True
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
assert len(timestamps) == len(actualValues) == len(predictedValues)
# We need the first timestamp to initialize the lines at the right X value,
# so do that check first.
if not self.linesInitialized:
self.initializeLines(timestamps)
for index in range(len(self.names)):
self.dates[index].append(timestamps[index])
self.convertedDates[index].append(date2num(timestamps[index]))
self.actualValues[index].append(actualValues[index])
self.predictedValues[index].append(predictedValues[index])
# Update data
self.actualLines[index].set_xdata(self.convertedDates[index])
self.actualLines[index].set_ydata(self.actualValues[index])
self.predictedLines[index].set_xdata(self.convertedDates[index])
self.predictedLines[index].set_ydata(self.predictedValues[index])
self.graphs[index].relim()
self.graphs[index].autoscale_view(True, True, True)
plt.draw()
plt.legend(('actual','predicted'), loc=3)
def close(self):
plt.ioff()
plt.show()
NuPICOutput.register(NuPICFileOutput)
NuPICOutput.register(NuPICPlotOutput)
|
agpl-3.0
|
jdhp-sap/data-pipeline-standalone-scripts
|
utils/plot_score_correlation.py
|
2
|
3747
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Make statistics on score files (stored in JSON files).
"""
import common_functions as common
import argparse
import json
import numpy as np
from matplotlib import pyplot as plt
if __name__ == '__main__':
# PARSE OPTIONS ###########################################################
parser = argparse.ArgumentParser(description="Make statistics on score files (JSON files).")
parser.add_argument("--logx", "-l", action="store_true", default=False,
help="Use a logaritmic scale on the X axis")
parser.add_argument("--logy", "-L", action="store_true", default=False,
help="Use a logaritmic scale on the Y axis")
parser.add_argument("--logz", action="store_true", default=False,
help="Use a logaritmic scale on the Y axis")
parser.add_argument("--output", "-o", default=None,
metavar="FILE",
help="The output file path")
parser.add_argument("--title", default=None,
metavar="STRING",
help="The title of the plot")
parser.add_argument("--index1", type=int, default=0, metavar="INT",
help="The index of the score to plot in case of multivalued scores")
parser.add_argument("--index2", type=int, default=1, metavar="INT",
help="The index of the score to plot in case of multivalued scores")
parser.add_argument("--hist2d", action="store_true",
help="Display an histogram")
parser.add_argument("--quiet", "-q", action="store_true",
help="Don't show the plot, just save it")
parser.add_argument("fileargs", nargs=1, metavar="FILE",
help="The JSON file to process")
args = parser.parse_args()
logx = args.logx
logy = args.logy
logz = args.logz
title = args.title
hist2d = args.hist2d
quiet = args.quiet
json_file_path = args.fileargs[0]
score_index1 = args.index1
score_index2 = args.index2
# FETCH SCORE #############################################################
json_dict = common.parse_json_file(json_file_path)
score_array = common.extract_score_2d_array(json_dict, score_index1, score_index2)
label = json_dict["label"]
if args.output is None:
suffix = label + "_i" + str(score_index1) + "_i" + str(score_index2)
output_file_path = "score_correlation{}.pdf".format(suffix)
else:
output_file_path = args.output
# PLOT STATISTICS #########################################################
fig, ax1 = plt.subplots(nrows=1, ncols=1, figsize=(10, 6))
if hist2d:
common.plot_hist2d(ax1,
score_array[:,0],
score_array[:,1],
"Score {}".format(score_index1),
"Score {}".format(score_index2),
logx,
logy,
logz)
else:
common.plot_correlation(ax1,
score_array[:,0],
score_array[:,1],
"Score {}".format(score_index1),
"Score {}".format(score_index2),
logx,
logy)
if title is not None:
ax1.set_title(title, fontsize=20)
else:
ax1.set_title("{} scores correlation".format(label), fontsize=20)
# Save file and plot ########
plt.savefig(output_file_path, bbox_inches='tight')
if not quiet:
plt.show()
|
mit
|
neale/CS-program
|
434-MachineLearning/final_project/linearClassifier/sklearn/datasets/samples_generator.py
|
26
|
56554
|
"""
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Initialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is zero (see notes). Larger values
enforce more sparsity.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://seat.massey.ac.nz/personal/s.r.marsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
|
unlicense
|
keras-team/keras-autodoc
|
tests/dummy_package/dummy_module.py
|
1
|
21401
|
class Dense:
"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, kernel) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `kernel` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then
it is flattened prior to the initial dot product with `kernel`.
# Example
```python
# as first layer in a sequential model:
model = Sequential()
model.add(Dense(32, input_shape=(16,)))
# now the model will take as input arrays of shape (*, 16)
# and output arrays of shape (*, 32)
# after the first layer, you don't need to specify
# the size of the input anymore:
model.add(Dense(32))
```
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
# Input shape
nD tensor with shape: `(batch_size, ..., input_dim)`.
The most common situation would be
a 2D input with shape `(batch_size, input_dim)`.
# Output shape
nD tensor with shape: `(batch_size, ..., units)`.
For instance, for a 2D input with shape `(batch_size, input_dim)`,
the output would have shape `(batch_size, units)`.
"""
def __init__(self, units,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
...
def to_categorical(y, num_classes=None, dtype='float32'):
"""Converts a class vector (integers) to binary class matrix.
E.g. for use with categorical_crossentropy.
# Arguments
y: class vector to be converted into a matrix
(integers from 0 to num_classes).
num_classes: total number of classes.
dtype: The data type expected by the input, as a string
(`float32`, `float64`, `int32`...)
# Returns
A binary matrix representation of the input. The classes axis
is placed last.
# Example
```python
# Consider an array of 5 labels out of a set of 3 classes {0, 1, 2}:
> labels
array([0, 2, 1, 2, 0])
# `to_categorical` converts this into a matrix with as many
# columns as there are classes. The number of rows
# stays the same.
> to_categorical(labels)
array([[ 1., 0., 0.],
[ 0., 0., 1.],
[ 0., 1., 0.],
[ 0., 0., 1.],
[ 1., 0., 0.]], dtype=float32)
```
"""
class ImageDataGenerator:
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_whitening: Boolean. Apply ZCA whitening.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats
in the interval `[-1.0, +1.0)`.
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats
in the interval `[-1.0, +1.0)`.
brightness_range: Tuple or list of two floats. Range for picking
a brightness shift value from.
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(after applying all other transformations).
preprocessing_function: function that will be applied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (NumPy tensor with rank 3),
and should output a NumPy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
interpolation_order: int, order to use for
the spline interpolation. Higher is slower.
dtype: Dtype to use for the generated arrays.
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
Example of using `flow_from_dataframe(dataframe, directory, x_col, y_col)`:
```python
train_df = pandas.read_csv("./train.csv")
valid_df = pandas.read_csv("./valid.csv")
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_dataframe(
dataframe=train_df,
directory='data/train',
x_col="filename",
y_col="class",
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_dataframe(
dataframe=valid_df,
directory='data/validation',
x_col="filename",
y_col="class",
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
"""
def __init__(self,
featurewise_center: bool = False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format='channels_last',
validation_split=0.0,
interpolation_order=1,
dtype='float32'):
...
def flow(self,
x,
y=None,
batch_size=32,
shuffle=True,
sample_weight=None,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
subset=None):
"""Takes data & label arrays, generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, in case
of RGB data, it should have value 3, and in case
of RGBA data, it should have value 4.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
...
def flow_from_directory(self,
directory,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: string, path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](
https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb".
Whether the images will be converted to
have 1, 3, or 4 channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
If set to False, sorts the data in alphanumeric order.
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
...
|
apache-2.0
|
feilchenfeldt/pypopgen
|
plot/plottools.py
|
1
|
2238
|
import io, re
import itertools
import matplotlib as mpl
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
mpl.rcParams['svg.fonttype'] = 'none'
#fix svg inkscape bug
def fixmiterlimit(svgdata, miterlimit = 10):
# miterlimit variable sets the desired miterlimit
mlfound = False
svgout = ""
for line in svgdata:
if not mlfound:
# searches the stroke-miterlimit within the current line and changes its value
mlstring = re.subn(r'stroke-miterlimit:([0-9]+)', "stroke-miterlimit:" + str(miterlimit), line)
#if mlstring[1]: # use number of changes made to the line to check whether anything was found
#mlfound = True
svgout += mlstring[0] + '\n'
else:
svgout += line + '\n'
return svgout
def svg_save(fn,**kwa):
imgdata = io.StringIO() # initiate StringIO to write figure data to
# the same you would use to save your figure to svg, but instead of filename use StringIO object
ax = plt.gca()
plt.savefig(imgdata,
format='svg', bbox_inches='tight',**kwa)
imgdata.seek(0) # rewind the data
svg_dta = imgdata.getvalue() # this is svg data
svgoutdata = fixmiterlimit(re.split(r'\n', svg_dta)) # pass as an array of lines
svgfh = open(fn, 'w')
svgfh.write(svgoutdata.encode('utf-8').strip())
svgfh.close()
return ax
def multiscatter(x_list, y_list, color_list, label_list, ax=None, **kwa):
if ax is None:
fig = plt.figure(figsize=(6,4))
ax = plt.gca()
data_l = []
legend_elements = []
for xs, ys, c, lbl in itertools.izip(x_list, y_list, color_list, label_list):
d = pd.DataFrame({'d':ys,'c':c}, index=xs)
data_l.append(d)
legend_element = mpl.lines.Line2D([0], [0], marker='o', color='w', label=lbl,
markerfacecolor=c, markersize=7)
legend_elements.append(legend_element)
#Thre driopna is crucial, otherwise color will not match values
data = pd.concat(data_l).sort_index().dropna()
ax.scatter(data.index.values,data['d'].values, color=list(data['c'].values),**kwa)
l = plt.legend(handles=legend_elements)
return ax, l
|
mit
|
jamessergeant/pylearn2
|
pylearn2/utils/image.py
|
39
|
18841
|
"""
Utility functions for working with images.
"""
import logging
import numpy as np
plt = None
axes = None
from theano.compat.six.moves import xrange
from theano.compat.six import string_types
import warnings
try:
import matplotlib.pyplot as plt
import matplotlib.axes
except (RuntimeError, ImportError, TypeError) as matplotlib_exception:
warnings.warn("Unable to import matplotlib. Some features unavailable. "
"Original exception: " + str(matplotlib_exception))
import os
try:
from PIL import Image
except ImportError:
Image = None
from pylearn2.utils import string_utils as string
from pylearn2.utils.exc import reraise_as
from tempfile import mkstemp
from multiprocessing import Process
import subprocess
logger = logging.getLogger(__name__)
def ensure_Image():
"""Makes sure Image has been imported from PIL"""
global Image
if Image is None:
raise RuntimeError("You are trying to use PIL-dependent functionality"
" but don't have PIL installed.")
def imview(*args, **kwargs):
"""
A matplotlib-based image viewer command,
wrapping `matplotlib.pyplot.imshow` but behaving more
sensibly.
Parameters
----------
figure : TODO
TODO: write parameters section using decorators to inherit
the matplotlib docstring
Notes
-----
Parameters are identical to `matplotlib.pyplot.imshow`
but this behaves somewhat differently:
* By default, it creates a new figure (unless a
`figure` keyword argument is supplied.
* It modifies the axes of that figure to use the
full frame, without ticks or tick labels.
* It turns on `nearest` interpolation by default
(i.e., it does not antialias pixel data). This
can be overridden with the `interpolation`
argument as in `imshow`.
All other arguments and keyword arguments are passed
on to `imshow`.`
"""
if 'figure' not in kwargs:
f = plt.figure()
else:
f = kwargs['figure']
new_ax = matplotlib.axes.Axes(f,
[0, 0, 1, 1],
xticks=[],
yticks=[],
frame_on=False)
f.delaxes(f.gca())
f.add_axes(new_ax)
if len(args) < 5 and 'interpolation' not in kwargs:
kwargs['interpolation'] = 'nearest'
plt.imshow(*args, **kwargs)
def imview_async(*args, **kwargs):
"""
A version of `imview` that forks a separate process and
immediately shows the image.
Parameters
----------
window_title : str
TODO: writeme with decorators to inherit the other imviews'
docstrings
Notes
-----
Supports the `window_title` keyword argument to cope with
the title always being 'Figure 1'.
Returns the `multiprocessing.Process` handle.
"""
if 'figure' in kwargs:
raise ValueError("passing a figure argument not supported")
def fork_image_viewer():
f = plt.figure()
kwargs['figure'] = f
imview(*args, **kwargs)
if 'window_title' in kwargs:
f.set_window_title(kwargs['window_title'])
plt.show()
p = Process(None, fork_image_viewer)
p.start()
return p
def show(image):
"""
.. todo::
WRITEME
Parameters
----------
image : PIL Image object or ndarray
If ndarray, integer formats are assumed to use 0-255
and float formats are assumed to use 0-1
"""
viewer_command = string.preprocess('${PYLEARN2_VIEWER_COMMAND}')
if viewer_command == 'inline':
return imview(image)
if hasattr(image, '__array__'):
# do some shape checking because PIL just raises a tuple indexing error
# that doesn't make it very clear what the problem is
if len(image.shape) < 2 or len(image.shape) > 3:
raise ValueError('image must have either 2 or 3 dimensions but its'
' shape is ' + str(image.shape))
# The below is a temporary workaround that prevents us from crashing
# 3rd party image viewers such as eog by writing out overly large
# images.
# In the long run we should determine if this is a bug in PIL when
# producing
# such images or a bug in eog and determine a proper fix.
# Since this is hopefully just a short term workaround the
# constants below are not included in the interface to the
# function, so that 3rd party code won't start passing them.
max_height = 4096
max_width = 4096
# Display separate warnings for each direction, since it's
# common to crop only one.
if image.shape[0] > max_height:
image = image[0:max_height, :, :]
warnings.warn("Cropping image to smaller height to avoid crashing "
"the viewer program.")
if image.shape[0] > max_width:
image = image[:, 0:max_width, :]
warnings.warn("Cropping the image to a smaller width to avoid "
"crashing the viewer program.")
# This ends the workaround
if image.dtype == 'int8':
image = np.cast['uint8'](image)
elif str(image.dtype).startswith('float'):
# don't use *=, we don't want to modify the input array
image = image * 255.
image = np.cast['uint8'](image)
# PIL is too stupid to handle single-channel arrays
if len(image.shape) == 3 and image.shape[2] == 1:
image = image[:, :, 0]
try:
ensure_Image()
image = Image.fromarray(image)
except TypeError:
reraise_as(TypeError("PIL issued TypeError on ndarray of shape " +
str(image.shape) + " and dtype " +
str(image.dtype)))
# Create a temporary file with the suffix '.png'.
fd, name = mkstemp(suffix='.png')
os.close(fd)
# Note:
# Although we can use tempfile.NamedTemporaryFile() to create
# a temporary file, the function should be used with care.
#
# In Python earlier than 2.7, a temporary file created by the
# function will be deleted just after the file is closed.
# We can re-use the name of the temporary file, but there is an
# instant where a file with the name does not exist in the file
# system before we re-use the name. This may cause a race
# condition.
#
# In Python 2.7 or later, tempfile.NamedTemporaryFile() has
# the 'delete' argument which can control whether a temporary
# file will be automatically deleted or not. With the argument,
# the above race condition can be avoided.
#
image.save(name)
if os.name == 'nt':
subprocess.Popen(viewer_command + ' ' + name + ' && del ' + name,
shell=True)
else:
subprocess.Popen(viewer_command + ' ' + name + ' ; rm ' + name,
shell=True)
def pil_from_ndarray(ndarray):
"""
Converts an ndarray to a PIL image.
Parameters
----------
ndarray : ndarray
An ndarray containing an image.
Returns
-------
pil : PIL Image
A PIL Image containing the image.
"""
try:
if ndarray.dtype == 'float32' or ndarray.dtype == 'float64':
assert ndarray.min() >= 0.0
assert ndarray.max() <= 1.0
ndarray = np.cast['uint8'](ndarray * 255)
if len(ndarray.shape) == 3 and ndarray.shape[2] == 1:
ndarray = ndarray[:, :, 0]
ensure_Image()
rval = Image.fromarray(ndarray)
return rval
except Exception as e:
logger.exception('original exception: ')
logger.exception(e)
logger.exception('ndarray.dtype: {0}'.format(ndarray.dtype))
logger.exception('ndarray.shape: {0}'.format(ndarray.shape))
raise
assert False
def ndarray_from_pil(pil, dtype='uint8'):
"""
Converts a PIL Image to an ndarray.
Parameters
----------
pil : PIL Image
An image represented as a PIL Image object
dtype : str
The dtype of ndarray to create
Returns
-------
ndarray : ndarray
The image as an ndarray.
"""
rval = np.asarray(pil)
if dtype != rval.dtype:
rval = np.cast[dtype](rval)
if str(dtype).startswith('float'):
rval /= 255.
if len(rval.shape) == 2:
rval = rval.reshape(rval.shape[0], rval.shape[1], 1)
return rval
def rescale(image, shape):
"""
Scales image to be no larger than shape. PIL might give you
unexpected results beyond that.
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
i = pil_from_ndarray(image)
ensure_Image()
i.thumbnail([shape[1], shape[0]], Image.ANTIALIAS)
rval = ndarray_from_pil(i, dtype=image.dtype)
return rval
resize = rescale
def fit_inside(image, shape):
"""
Scales image down to fit inside shape preserves proportions of image
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
if image.shape[0] <= shape[0] and image.shape[1] <= shape[1]:
return image.copy()
row_ratio = float(image.shape[0]) / float(shape[0])
col_ratio = float(image.shape[1]) / float(shape[1])
if row_ratio > col_ratio:
target_shape = [shape[0], min(image.shape[1] / row_ratio, shape[1])]
else:
target_shape = [min(image.shape[0] / col_ratio, shape[0]), shape[1]]
assert target_shape[0] <= shape[0]
assert target_shape[1] <= shape[1]
assert target_shape[0] == shape[0] or target_shape[1] == shape[1]
rval = rescale(image, target_shape)
return rval
def letterbox(image, shape):
"""
Pads image with black letterboxing to bring image.shape up to shape
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
assert image.shape[0] <= shape[0]
assert image.shape[1] <= shape[1]
if image.shape[0] == shape[0] and image.shape[1] == shape[1]:
return image.copy()
rval = np.zeros((shape[0], shape[1], image.shape[2]), dtype=image.dtype)
rstart = (shape[0] - image.shape[0]) / 2
cstart = (shape[1] - image.shape[1]) / 2
rend = rstart + image.shape[0]
cend = cstart + image.shape[1]
rval[rstart:rend, cstart:cend] = image
return rval
def make_letterboxed_thumbnail(image, shape):
"""
Scales image down to shape. Preserves proportions of image, introduces
black letterboxing if necessary.
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3
assert len(shape) == 2
shrunk = fit_inside(image, shape)
letterboxed = letterbox(shrunk, shape)
return letterboxed
def load(filepath, rescale_image=True, dtype='float64'):
"""
Load an image from a file.
Parameters
----------
filepath : str
Path to the image file to load
rescale_image : bool
Default value: True
If True, returned images have pixel values in [0, 1]. Otherwise,
values are in [0, 255].
dtype: str
The dtype to use for the returned value
Returns
-------
img : numpy ndarray
An array containing the image that was in the file.
"""
assert isinstance(filepath, string_types)
if not rescale_image and dtype == 'uint8':
ensure_Image()
rval = np.asarray(Image.open(filepath))
assert rval.dtype == 'uint8'
return rval
s = 1.0
if rescale_image:
s = 255.
try:
ensure_Image()
rval = Image.open(filepath)
except Exception:
reraise_as(Exception("Could not open " + filepath))
numpy_rval = np.array(rval)
msg = ("Tried to load an image, got an array with %d"
" dimensions. Expected 2 or 3."
"This may indicate a mildly corrupted image file. Try "
"converting it to a different image format with a different "
"editor like gimp or imagemagic. Sometimes these programs are "
"more robust to minor corruption than PIL and will emit a "
"correctly formatted image in the new format.")
if numpy_rval.ndim not in [2, 3]:
logger.error(dir(rval))
logger.error(rval)
logger.error(rval.size)
rval.show()
raise AssertionError(msg % numpy_rval.ndim)
rval = numpy_rval
rval = np.cast[dtype](rval) / s
if rval.ndim == 2:
rval = rval.reshape(rval.shape[0], rval.shape[1], 1)
if rval.ndim != 3:
raise AssertionError("Something went wrong opening " +
filepath + '. Resulting shape is ' +
str(rval.shape) +
" (it's meant to have 3 dimensions by now)")
return rval
def save(filepath, ndarray):
"""
Saves an image to a file.
Parameters
----------
filepath : str
The path to write the file to.
ndarray : ndarray
An array containing the image to be saved.
"""
pil_from_ndarray(ndarray).save(filepath)
def scale_to_unit_interval(ndar, eps=1e-8):
"""
Scales all values in the ndarray ndar to be between 0 and 1
Parameters
----------
ndar : WRITEME
eps : WRITEME
Returns
-------
WRITEME
"""
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
Parameters
----------
x : numpy.ndarray
2-d ndarray or 4 tuple of 2-d ndarrays or None for channels,
in which every row is a flattened image.
shape : 2-tuple of ints
The first component is the height of each image,
the second component is the width.
tile_shape : 2-tuple of ints
The number of images to tile in (row, columns) form.
scale_rows_to_unit_interval : bool
Whether or not the values need to be before being plotted to [0, 1].
output_pixel_vals : bool
Whether or not the output should be pixel values (int8) or floats.
Returns
-------
y : 2d-ndarray
The return value has the same dtype as X, and is suitable for
viewing as an image with PIL.Image.fromarray.
"""
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0,0]
# out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -
# tile_spacing[1]
out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp
in zip(img_shape, tile_shape, tile_spacing)]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output np ndarray to store the image
if output_pixel_vals:
out_array = np.zeros((out_shape[0], out_shape[1], 4),
dtype='uint8')
else:
out_array = np.zeros((out_shape[0], out_shape[1], 4),
dtype=X.dtype)
# colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in xrange(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
dt = out_array.dtype
if output_pixel_vals:
dt = 'uint8'
out_array[:, :, i] = np.zeros(out_shape, dtype=dt) + \
channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
out_array[:, :, i] = tile_raster_images(
X[i], img_shape, tile_shape, tile_spacing,
scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
dt = X.dtype
if output_pixel_vals:
dt = 'uint8'
out_array = np.zeros(out_shape, dtype=dt)
for tile_row in xrange(tile_shape[0]):
for tile_col in xrange(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
this_x = X[tile_row * tile_shape[1] + tile_col]
if scale_rows_to_unit_interval:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
this_img = scale_to_unit_interval(
this_x.reshape(img_shape))
else:
this_img = this_x.reshape(img_shape)
# add the slice to the corresponding position in the
# output array
c = 1
if output_pixel_vals:
c = 255
out_array[
tile_row * (H + Hs): tile_row * (H + Hs) + H,
tile_col * (W + Ws): tile_col * (W + Ws) + W
] = this_img * c
return out_array
if __name__ == '__main__':
black = np.zeros((50, 50, 3), dtype='uint8')
red = black.copy()
red[:, :, 0] = 255
green = black.copy()
green[:, :, 1] = 255
show(black)
show(green)
show(red)
|
bsd-3-clause
|
untom/scikit-learn
|
examples/ensemble/plot_gradient_boosting_regularization.py
|
355
|
2843
|
"""
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
|
bsd-3-clause
|
AlexisEidelman/Til
|
til/utilisations/Compar_Destinie_Patrimoine/run.py
|
2
|
4828
|
# -*- coding:utf-8 -*-
'''
Ce programme compare la base Destinie et la base générée directement à partir de l'enquête patrimoine.
Les tables issues de Destinie et de Patrimoine doivent déjà avoir été créées
- data//data//Destinie//Patrimoine.py et data//data//Destinie//Destinie.py
Pour Patrimoine, ça ne sert à rien de duppliquer pour comparer donc on part de la version Patrimoine_0
'''
import pdb
from pgm.CONFIG import path_model
from pandas import read_hdf, HDFStore
import tables
import numpy as np
from scipy.stats import mstats
# retourne une ficher stat qui permet la comparaison entre deux tables
# d'abord on récupéère les stat de chaque table,
# ensuite on fait la différence.
list_var_num = ['agem','sali','rsti','choi']
list_var_qual = ['civilstate','workstate','civilstate','findet','quimen','quifoy','sexe']
pat = read_hdf(path_model + 'Patrimoine_400.h5', 'entities//person')
dest = read_hdf(path_model + 'Destinie.h5', 'entities//person')
table = pat
table1 = 'Patrimoine_300'
table2 = 'Destinie'
def quantile10(x): return x.quantile(0.1)
def quantile25(x): return x.quantile(0.25)
def quantile50(x): return x.quantile(0.5)
def quantile75(x): return x.quantile(0.75)
stat_on_numeric = [np.mean, np.std, np.min, np.max, quantile10, quantile25, quantile50, quantile75] #I don't know how to add some lambda function
class Comparaison_bases(object):
'''
Organisation de la comparaison des deux bases de données
Les deux bases ont des rôles symétriques, on présente toujours la différence de la seconde moins la premiere
'''
def __init__(self, table1='', table2=''):
self.name_tables = [table1, table2]
self.tables = None
self.stat_on_numeric = [np.mean, np.std, np.min, np.max, quantile10, quantile25, quantile50, quantile75]
def load(self):
self.tables = []
for name_table in self.name_tables:
table = read_hdf(path_model + name_table + '.h5', 'entities//person')
table['Total'] = 'Total'
self.tables += [table]
def get_stat_num(self, var_num, sous_cat=None):
''' retourne les stats sur des variables numérique (ou pseudo numériques)
- les variables sont celle de list_var_num
- les stats sont celle de la fonction get_stat_on_numeric (que l'on pourrait mettre en attribut de la class)
- le résultats est une liste de deux series, une pour chaque table (on a une liste parce qu'on veut garder un ordre pour la différence
- le dictionnaire de chaque table a une entrée par variable et comme valeur le resultat de tous les tests
'''
if sous_cat is None:
sous_cat = ['Total']
tab0, tab1 = self.tables
return [tab0[[var_num] + sous_cat].groupby(sous_cat).agg(stat_on_numeric),
tab1[[var_num] + sous_cat].groupby(sous_cat).agg(stat_on_numeric)]
def get_stat_qual(self, var_qual, sous_cat=None):
''' retourne les stats sur des variables qualitative, en fait la fréquence
- les variables sont celle de list_var_qual
- le résultats est un dictionnaire avec une clé pour chaque variable et comme valeur une liste
de deux tables qui sont les fréquences de la variable.
'''
if sous_cat is None:
sous_cat = ['Total']
tab0, tab1 = self.tables
gp0 = tab0[[var_qual] + sous_cat].groupby(sous_cat)
gp1 = tab1[[var_qual] + sous_cat].groupby(sous_cat)
value0 = gp0[var_qual].value_counts()
value0 = 100*value0/value0.sum()
value1 = gp1[var_qual].value_counts()
value1 = 100*value1/value1.sum()
return value0, value1
def get_diff_num(self, var_num, sous_cat=None):
''' sort dans un dictionnaire la différence entre les statistiques sur les deux tables'''
print var_num
stat0, stat1 = self.get_stat_num(var_num, sous_cat)
return stat1 - stat0
def get_diff_qual(self, var_qual, sous_cat=None):
''' sort dans un dictionnaire la différence entre les statistiques sur les deux tables'''
stat = self.get_stat_qual(var_qual, sous_cat)
return stat[1] - stat[0]
if __name__ == '__main__':
cc = Comparaison_bases('Patrimoine_400','Destinie')
cc.load()
tab0, tab1 = cc.tables
for var_num in list_var_num:
if var_num not in ['sexe']:
print cc.get_diff_num(var_num, ['sexe'])
for var_qual in list_var_qual:
if var_qual not in ['sexe']:
print var_qual
print cc.get_diff_qual(var_qual, ['sexe'])
self = cc
cc.get_stat_qual('findet', ['sexe'])
pdb.set_trace()
|
gpl-3.0
|
heli522/scikit-learn
|
examples/ensemble/plot_bias_variance.py
|
357
|
7324
|
"""
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in range(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.show()
|
bsd-3-clause
|
bigdataelephants/scikit-learn
|
benchmarks/bench_multilabel_metrics.py
|
11
|
7284
|
#!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
return_indicator=True,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
return_indicator=True,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: '.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
|
bsd-3-clause
|
fzalkow/scikit-learn
|
doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py
|
256
|
2406
|
"""Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
|
bsd-3-clause
|
depet/scikit-learn
|
sklearn/cross_decomposition/pls_.py
|
2
|
28547
|
"""
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <[email protected]>
# License: BSD 3 clause
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_arrays
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights))
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
## 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights))
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / np.dot(y_weights.T, y_weights)
## y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q]
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
`x_weights_` : array, [p, n_components]
X block weights vectors.
`y_weights_` : array, [q, n_components]
Y block weights vectors.
`x_loadings_` : array, [p, n_components]
X block loadings vectors.
`y_loadings_` : array, [q, n_components]
Y block loadings vectors.
`x_scores_` : array, [n_samples, n_components]
X scores.
`y_scores_` : array, [n_samples, n_components]
Y scores.
`x_rotations_` : array, [p, n_components]
X block to latents rotations.
`y_rotations_` : array, [q, n_components]
Y block to latents rotations.
coefs: array, [p, q]
The coefficients of the linear model: Y = X coefs + Err
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the residuals (deflated) matrices
X, Y = check_arrays(X, Y, dtype=np.float, copy=self.copy,
sparse_format='dense')
if X.ndim != 2:
raise ValueError('X must be a 2D array')
if Y.ndim == 1:
Y = Y.reshape((Y.size, 1))
if Y.ndim != 2:
raise ValueError('Y must be a 1D or a 2D array')
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if n != Y.shape[0]:
raise ValueError(
'Incompatible shapes: X has %s samples, while Y '
'has %s' % (X.shape[0], Y.shape[0]))
if self.n_components < 1 or self.n_components > p:
raise ValueError('invalid number of components')
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if not self.deflation_mode in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
= _center_scale_xy(X, Y, self.scale)
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
#1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights = _nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
#2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.inv(np.dot(self.x_loadings_.T, self.x_weights_)))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.inv(np.dot(self.y_loadings_.T, self.y_weights_)))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coefs = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coefs = (1. / self.x_std_.reshape((p, 1)) * self.coefs *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
# Normalize
if copy:
Xc = (np.asarray(X) - self.x_mean_) / self.x_std_
if Y is not None:
Yc = (np.asarray(Y) - self.y_mean_) / self.y_std_
else:
X = np.asarray(X)
Xc -= self.x_mean_
Xc /= self.x_std_
if Y is not None:
Y = np.asarray(Y)
Yc -= self.y_mean_
Yc /= self.y_std_
# Apply rotation
x_scores = np.dot(Xc, self.x_rotations_)
if Y is not None:
y_scores = np.dot(Yc, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
# Normalize
if copy:
Xc = (np.asarray(X) - self.x_mean_)
else:
X = np.asarray(X)
Xc -= self.x_mean_
Xc /= self.x_std_
Ypred = np.dot(Xc, self.coefs)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q]
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
`x_weights_` : array, [p, n_components]
X block weights vectors.
`y_weights_` : array, [q, n_components]
Y block weights vectors.
`x_loadings_` : array, [p, n_components]
X block loadings vectors.
`y_loadings_` : array, [q, n_components]
Y block loadings vectors.
`x_scores_` : array, [n_samples, n_components]
X scores.
`y_scores_` : array, [n_samples, n_components]
Y scores.
`x_rotations_` : array, [p, n_components]
X block to latents rotations.
`y_rotations_` : array, [q, n_components]
Y block to latents rotations.
coefs: array, [p, q]
The coefficients of the linear model: Y = X coefs + Err
Notes
-----
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical, PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples is the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q]
Training vectors, where n_samples is the number of samples and
q is the number of response variables.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
`x_weights_` : array, shape = [p, n_components]
X block weights vectors.
`y_weights_` : array, shape = [q, n_components]
Y block weights vectors.
`x_loadings_` : array, shape = [p, n_components]
X block loadings vectors.
`y_loadings_` : array, shape = [q, n_components]
Y block loadings vectors.
`x_scores_` : array, shape = [n_samples, n_components]
X scores.
`y_scores_` : array, shape = [n_samples, n_components]
Y scores.
`x_rotations_` : array, shape = [p, n_components]
X block to latents rotations.
`y_rotations_` : array, shape = [q, n_components]
Y block to latents rotations.
Notes
-----
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or colinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical, PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vector, where n_samples is the number of samples and
p is the number of predictors. X will be centered before any analysis.
Y : array-like of response, shape = [n_samples, q]
Training vector, where n_samples is the number of samples and
q is the number of response variables. X will be centered before any
analysis.
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale X and Y.
Attributes
----------
`x_weights_` : array, [p, n_components]
X block weights vectors.
`y_weights_` : array, [q, n_components]
Y block weights vectors.
`x_scores_` : array, [n_samples, n_components]
X scores.
`y_scores_` : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
X, Y = check_arrays(X, Y, dtype=np.float, copy=self.copy,
sparse_format='dense')
n = X.shape[0]
p = X.shape[1]
if X.ndim != 2:
raise ValueError('X must be a 2D array')
if n != Y.shape[0]:
raise ValueError(
'Incompatible shapes: X has %s samples, while Y '
'has %s' % (X.shape[0], Y.shape[0]))
if self.n_components < 1 or self.n_components > p:
raise ValueError('invalid number of components')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\
_center_scale_xy(X, Y, self.scale)
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components == C.shape[1]:
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
|
bsd-3-clause
|
Weihonghao/ECM
|
Vpy34/lib/python3.5/site-packages/pandas/tests/frame/test_convert_to.py
|
6
|
7374
|
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from pandas import compat
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameConvertTo(TestData):
def test_to_dict(self):
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
recons_data = DataFrame(test_data).to_dict()
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
assert v2 == recons_data[k][k2]
recons_data = DataFrame(test_data).to_dict("l")
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
assert v2 == recons_data[k][int(k2) - 1]
recons_data = DataFrame(test_data).to_dict("s")
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
assert v2 == recons_data[k][k2]
recons_data = DataFrame(test_data).to_dict("sp")
expected_split = {'columns': ['A', 'B'], 'index': ['1', '2', '3'],
'data': [[1.0, '1'], [2.0, '2'], [np.nan, '3']]}
tm.assert_dict_equal(recons_data, expected_split)
recons_data = DataFrame(test_data).to_dict("r")
expected_records = [{'A': 1.0, 'B': '1'},
{'A': 2.0, 'B': '2'},
{'A': np.nan, 'B': '3'}]
assert isinstance(recons_data, list)
assert len(recons_data) == 3
for l, r in zip(recons_data, expected_records):
tm.assert_dict_equal(l, r)
# GH10844
recons_data = DataFrame(test_data).to_dict("i")
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
assert v2 == recons_data[k2][k]
def test_to_dict_timestamp(self):
# GH11247
# split/records producing np.datetime64 rather than Timestamps
# on datetime64[ns] dtypes only
tsmp = Timestamp('20130101')
test_data = DataFrame({'A': [tsmp, tsmp], 'B': [tsmp, tsmp]})
test_data_mixed = DataFrame({'A': [tsmp, tsmp], 'B': [1, 2]})
expected_records = [{'A': tsmp, 'B': tsmp},
{'A': tsmp, 'B': tsmp}]
expected_records_mixed = [{'A': tsmp, 'B': 1},
{'A': tsmp, 'B': 2}]
assert (test_data.to_dict(orient='records') ==
expected_records)
assert (test_data_mixed.to_dict(orient='records') ==
expected_records_mixed)
expected_series = {
'A': Series([tsmp, tsmp], name='A'),
'B': Series([tsmp, tsmp], name='B'),
}
expected_series_mixed = {
'A': Series([tsmp, tsmp], name='A'),
'B': Series([1, 2], name='B'),
}
tm.assert_dict_equal(test_data.to_dict(orient='series'),
expected_series)
tm.assert_dict_equal(test_data_mixed.to_dict(orient='series'),
expected_series_mixed)
expected_split = {
'index': [0, 1],
'data': [[tsmp, tsmp],
[tsmp, tsmp]],
'columns': ['A', 'B']
}
expected_split_mixed = {
'index': [0, 1],
'data': [[tsmp, 1],
[tsmp, 2]],
'columns': ['A', 'B']
}
tm.assert_dict_equal(test_data.to_dict(orient='split'),
expected_split)
tm.assert_dict_equal(test_data_mixed.to_dict(orient='split'),
expected_split_mixed)
def test_to_dict_invalid_orient(self):
df = DataFrame({'A': [0, 1]})
pytest.raises(ValueError, df.to_dict, orient='xinvalid')
def test_to_records_dt64(self):
df = DataFrame([["one", "two", "three"],
["four", "five", "six"]],
index=date_range("2012-01-01", "2012-01-02"))
assert df.to_records()['index'][0] == df.index[0]
rs = df.to_records(convert_datetime64=False)
assert rs['index'][0] == df.index.values[0]
def test_to_records_with_multindex(self):
# GH3189
index = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
data = np.zeros((8, 4))
df = DataFrame(data, index=index)
r = df.to_records(index=True)['level_0']
assert 'bar' in r
assert 'one' not in r
def test_to_records_with_Mapping_type(self):
import email
from email.parser import Parser
import collections
collections.Mapping.register(email.message.Message)
headers = Parser().parsestr('From: <[email protected]>\n'
'To: <[email protected]>\n'
'Subject: Test message\n'
'\n'
'Body would go here\n')
frame = DataFrame.from_records([headers])
all(x in frame for x in ['Type', 'Subject', 'From'])
def test_to_records_floats(self):
df = DataFrame(np.random.rand(10, 10))
df.to_records()
def test_to_records_index_name(self):
df = DataFrame(np.random.randn(3, 3))
df.index.name = 'X'
rs = df.to_records()
assert 'X' in rs.dtype.fields
df = DataFrame(np.random.randn(3, 3))
rs = df.to_records()
assert 'index' in rs.dtype.fields
df.index = MultiIndex.from_tuples([('a', 'x'), ('a', 'y'), ('b', 'z')])
df.index.names = ['A', None]
rs = df.to_records()
assert 'level_0' in rs.dtype.fields
def test_to_records_with_unicode_index(self):
# GH13172
# unicode_literals conflict with to_records
result = DataFrame([{u'a': u'x', u'b': 'y'}]).set_index(u'a')\
.to_records()
expected = np.rec.array([('x', 'y')], dtype=[('a', 'O'), ('b', 'O')])
tm.assert_almost_equal(result, expected)
def test_to_records_with_unicode_column_names(self):
# xref issue: https://github.com/numpy/numpy/issues/2407
# Issue #11879. to_records used to raise an exception when used
# with column names containing non ascii caracters in Python 2
result = DataFrame(data={u"accented_name_é": [1.0]}).to_records()
# Note that numpy allows for unicode field names but dtypes need
# to be specified using dictionnary intsead of list of tuples.
expected = np.rec.array(
[(0, 1.0)],
dtype={"names": ["index", u"accented_name_é"],
"formats": ['<i8', '<f8']}
)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize('tz', ['UTC', 'GMT', 'US/Eastern'])
def test_to_records_datetimeindex_with_tz(tz):
# GH13937
dr = date_range('2016-01-01', periods=10,
freq='S', tz=tz)
df = DataFrame({'datetime': dr}, index=dr)
expected = df.to_records()
result = df.tz_convert("UTC").to_records()
# both converted to UTC, so they are equal
tm.assert_numpy_array_equal(result, expected)
|
agpl-3.0
|
braghiere/JULESv4.6_clump
|
examples/point_loobos/output/plot.py
|
2
|
11652
|
from netCDF4 import Dataset # http://code.google.com/p/netcdf4-python/
import numpy as np
import datetime as dt # Python standard library datetime module
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
def ncdump(nc_fid, verb=True):
'''
ncdump outputs dimensions, variables and their attribute information.
The information is similar to that of NCAR's ncdump utility.
ncdump requires a valid instance of Dataset.
Parameters
----------
nc_fid : netCDF4.Dataset
A netCDF4 dateset object
verb : Boolean
whether or not nc_attrs, nc_dims, and nc_vars are printed
Returns
-------
nc_attrs : list
A Python list of the NetCDF file global attributes
nc_dims : list
A Python list of the NetCDF file dimensions
nc_vars : list
A Python list of the NetCDF file variables
'''
def print_ncattr(key):
"""
Prints the NetCDF file attributes for a given key
Parameters
----------
key : unicode
a valid netCDF4.Dataset.variables key
"""
try:
print "\t\ttype:", repr(nc_fid.variables[key].dtype)
for ncattr in nc_fid.variables[key].ncattrs():
print '\t\t%s:' % ncattr,\
repr(nc_fid.variables[key].getncattr(ncattr))
except KeyError:
print "\t\tWARNING: %s does not contain variable attributes" % key
# NetCDF global attributes
nc_attrs = nc_fid.ncattrs()
if verb:
print "NetCDF Global Attributes:"
for nc_attr in nc_attrs:
print '\t%s:' % nc_attr, repr(nc_fid.getncattr(nc_attr))
nc_dims = [dim for dim in nc_fid.dimensions] # list of nc dimensions
# Dimension shape information.
if verb:
print "NetCDF dimension information:"
for dim in nc_dims:
print "\tName:", dim
print "\t\tsize:", len(nc_fid.dimensions[dim])
print_ncattr(dim)
# Variable information.
nc_vars = [var for var in nc_fid.variables] # list of nc variables
if verb:
print "NetCDF variable information:"
for var in nc_vars:
if var not in nc_dims:
print '\tName:', var
print "\t\tdimensions:", nc_fid.variables[var].dimensions
print "\t\tsize:", nc_fid.variables[var].size
print_ncattr(var)
return nc_attrs, nc_dims, nc_vars
my_example_nc_file = '/home/mn811042/jules4.x/4.6/trunk/examples/point_loobos/output/loobos.day.nc'
my_example_nc_file_2 = '/home/mn811042/jules4.x/4.6/trunk/examples/point_loobos/output/loobos.day_can_struc_a_05.nc'
nc_fid = Dataset(my_example_nc_file, mode='r')
nc_fid_2 = Dataset(my_example_nc_file_2, mode='r')
nc_attrs, nc_dims, nc_vars = ncdump(nc_fid)
# Extract data from NetCDF file
lats = nc_fid.variables['latitude'][:] # extract/copy the data
lons = nc_fid.variables['longitude'][:]
time = nc_fid.variables['time'][:]
gpp = nc_fid.variables['gpp'][:] # shape is time, lat, lon as shown above - 'PFT gross primary productivity'
gpp_gb = nc_fid.variables['gpp_gb'][:] # shape is time, lat, lon as shown above - 'Gridbox gross primary productivity'
gpp_2 = nc_fid_2.variables['gpp'][:] # shape is time, lat, lon as shown above - 'PFT gross primary productivity'
gpp_gb_2 = nc_fid_2.variables['gpp_gb'][:] # shape is time, lat, lon as shown above - 'Gridbox gross primary productivity'
print dt.timedelta(hours=np.float64(24))
time_idx = 100 # some random day in 2012
# Python and the renalaysis are slightly off in time so this fixes that problem
#offset = dt.timedelta(hours=t3)
offset = dt.timedelta(hours=np.float64(24))
# List of all times in the file as datetime objects
dt_time = [dt.date(1997, 1, 1) + dt.timedelta(hours=np.float64(t)) - offset\
for t in time]
cur_time = dt_time[time_idx]
# Plot of global temperature on our random day
#>>>>>fig = plt.figure()
#>>>>>fig.subplots_adjust(left=0., right=1., bottom=0., top=0.9)
# Setup the map. See http://matplotlib.org/basemap/users/mapsetup.html
# for other projections.
#>>>>>m = Basemap(projection='moll', llcrnrlat=-90, urcrnrlat=90,\
#>>>>> llcrnrlon=0, urcrnrlon=360, resolution='c', lon_0=0)
#>>>>>m.drawcoastlines()
#>>>>>m.drawmapboundary()
# Make the plot continuous
#>>>>>air_cyclic, lons_cyclic = addcyclic(air[time_idx, :, :], lons)
# Shift the grid so lons go from -180 to 180 instead of 0 to 360.
#>>>>>air_cyclic, lons_cyclic = shiftgrid(180., air_cyclic, lons_cyclic, start=False)
# Create 2D lat/lon arrays for Basemap
#>>>>>lon2d, lat2d = np.meshgrid(lons_cyclic, lats)
# Transforms lat/lon into plotting coordinates for projection
#>>>>>x, y = m(lon2d, lat2d)
# Plot of air temperature with 11 contour intervals
#>>>>>cs = m.contourf(x, y, air_cyclic, 11, cmap=plt.cm.Spectral_r)
#>>>>>cbar = plt.colorbar(cs, orientation='horizontal', shrink=0.5)
#>>>>>cbar.set_label("%s (%s)" % (nc_fid.variables['air'].var_desc,\
#>>>>> nc_fid.variables['air'].units))
#>>>>>plt.title("%s on %s" % (nc_fid.variables['air'].var_desc, cur_time))
# Writing NetCDF files
# For this example, we will create two NetCDF4 files. One with the global air
# temperature departure from its value at Darwin, Australia. The other with
# the temperature profile for the entire year at Darwin.
darwin = {'name': 'Darwin, Australia', 'lat': -12.45, 'lon': 130.83}
# Find the nearest latitude and longitude for Darwin
lat_idx = np.abs(lats - darwin['lat']).argmin()
lon_idx = np.abs(lons - darwin['lon']).argmin()
# Simple example: temperature profile for the entire year at Darwin.
# Open a new NetCDF file to write the data to. For format, you can choose from
# 'NETCDF3_CLASSIC', 'NETCDF3_64BIT', 'NETCDF4_CLASSIC', and 'NETCDF4'
#>>>>>w_nc_fid = Dataset('darwin_2012.nc', 'w', format='NETCDF4')
#>>>>>w_nc_fid.description = "NCEP/NCAR Reanalysis %s from its value at %s. %s" %\
#>>>>> (nc_fid.variables['air'].var_desc.lower(),\
#>>>>> darwin['name'], nc_fid.description)
# Using our previous dimension info, we can create the new time dimension
# Even though we know the size, we are going to set the size to unknown
#>>>>>w_nc_fid.createDimension('time', None)
#>>>>>w_nc_dim = w_nc_fid.createVariable('time', nc_fid.variables['time'].dtype,\
#>>>>> ('time',))
# You can do this step yourself but someone else did the work for us.
#>>>>>for ncattr in nc_fid.variables['time'].ncattrs():
#>>>>> w_nc_dim.setncattr(ncattr, nc_fid.variables['time'].getncattr(ncattr))
# Assign the dimension data to the new NetCDF file.
#>>>>>w_nc_fid.variables['time'][:] = time
#>>>>>w_nc_var = w_nc_fid.createVariable('air', 'f8', ('time'))
#>>>>>w_nc_var.setncatts({'long_name': u"mean Daily Air temperature",\
#>>>>> 'units': u"degK", 'level_desc': u'Surface',\
#>>>>> 'var_desc': u"Air temperature",\
#>>>>> 'statistic': u'Mean\nM'})
#>>>>>w_nc_fid.variables['air'][:] = air[time_idx, lat_idx, lon_idx]
#>>>>>w_nc_fid.close() # close the new file
# A plot of the temperature profile for Darwin in 2012
fig = plt.figure()
plt.plot(dt_time, gpp[:, lat_idx, lon_idx], c='r',label='PFT GPP - a = 1.0')
#plt.plot(dt_time[time_idx], gpp[time_idx, lat_idx, lon_idx], c='b', marker='o',label='Gridbox GPP - a = 1.0')
plt.plot(dt_time, gpp_gb[:, lat_idx, lon_idx], c='g',label='Gridbox GPP - a = 1.0')
plt.plot(dt_time, gpp_2[:, lat_idx, lon_idx], c='b', marker='o',label='PFT GPP - a = 0.5')
plt.plot(dt_time, gpp_gb_2[:, lat_idx, lon_idx], c='k', marker='o',label='Gridbox GPP - a = 0.5')
plt.text(dt_time[time_idx], gpp[time_idx, lat_idx, lon_idx], cur_time,\
ha='right')
#fig.autofmt_xdate()
#plt.ylabel("%s (%s)" % (nc_fid.variables['gpp'].var_desc,\
# nc_fid.variables['gpp'].units))
plt.xlabel("Time in seconds since 1997-01-01 00:00:00")
plt.ylabel("GPP (kg m-2 s-1)")
#plt.title("%s from\n%s for %s" % (nc_fid.variables['gpp'].var_desc,\
# darwin['name'], cur_time.year))
plt.title("Loobos Flux site")
plt.legend()
plt.show()
plt.plot(gpp[:, lat_idx, lon_idx], gpp_2[:, lat_idx, lon_idx], marker='o',label='PFT GPP')
plt.plot(gpp_gb[:, lat_idx, lon_idx], gpp_gb_2[:, lat_idx, lon_idx],marker='o',label='Gridbox GPP')
plt.plot([0,1.8e-7],[0,1.8e-7],c='k',label='1:1')
plt.ylabel("GPP a = 0.5 (kg m-2 s-1)")
plt.xlabel("GPP a = 1.0 (kg m-2 s-1)")
plt.title("Loobos Flux site")
plt.legend()
plt.show()
# Complex example: global temperature departure from its value at Darwin
departure = gpp[:, :, :] - gpp[:, lat_idx, lon_idx].reshape((time.shape[0],1, 1))
# Open a new NetCDF file to write the data to. For format, you can choose from
# 'NETCDF3_CLASSIC', 'NETCDF3_64BIT', 'NETCDF4_CLASSIC', and 'NETCDF4'
w_nc_fid = Dataset('air.departure.sig995.2012.nc', 'w', format='NETCDF4')
w_nc_fid.description = "The departure of the NCEP/NCAR Reanalysis " +\
"%s from its value at %s. %s" %\
(nc_fid.variables['air'].var_desc.lower(),\
darwin['name'], nc_fid.description)
# Using our previous dimension information, we can create the new dimensions
data = {}
for dim in nc_dims:
w_nc_fid.createDimension(dim, nc_fid.variables[dim].size)
data[dim] = w_nc_fid.createVariable(dim, nc_fid.variables[dim].dtype,\
(dim,))
# You can do this step yourself but someone else did the work for us.
for ncattr in nc_fid.variables[dim].ncattrs():
data[dim].setncattr(ncattr, nc_fid.variables[dim].getncattr(ncattr))
# Assign the dimension data to the new NetCDF file.
w_nc_fid.variables['time'][:] = time
w_nc_fid.variables['lat'][:] = lats
w_nc_fid.variables['lon'][:] = lons
# Ok, time to create our departure variable
w_nc_var = w_nc_fid.createVariable('air_dep', 'f8', ('time', 'lat', 'lon'))
w_nc_var.setncatts({'long_name': u"mean Daily Air temperature departure",\
'units': u"degK", 'level_desc': u'Surface',\
'var_desc': u"Air temperature departure",\
'statistic': u'Mean\nM'})
w_nc_fid.variables['air_dep'][:] = departure
w_nc_fid.close() # close the new file
# Rounded maximum absolute value of the departure used for contouring
max_dep = np.round(np.abs(departure[time_idx, :, :]).max()+5., decimals=-1)
# Generate a figure of the departure for a single day
fig = plt.figure()
fig.subplots_adjust(left=0., right=1., bottom=0., top=0.9)
m = Basemap(projection='moll', llcrnrlat=-90, urcrnrlat=90,\
llcrnrlon=0, urcrnrlon=360, resolution='c', lon_0=0)
m.drawcoastlines()
m.drawmapboundary()
dep_cyclic, lons_cyclic = addcyclic(departure[time_idx, :, :], lons)
dep_cyclic, lons_cyclic = shiftgrid(180., dep_cyclic, lons_cyclic, start=False)
lon2d, lat2d = np.meshgrid(lons_cyclic, lats)
x, y = m(lon2d, lat2d)
levels = np.linspace(-max_dep, max_dep, 11)
cs = m.contourf(x, y, dep_cyclic, levels=levels, cmap=plt.cm.bwr)
x, y = m(darwin['lon'], darwin['lat'])
plt.plot(x, y, c='c', marker='o')
plt.text(x, y, 'Darwin,\nAustralia', color='r', weight='semibold')
cbar = plt.colorbar(cs, orientation='horizontal', shrink=0.5)
cbar.set_label("%s departure (%s)" % (nc_fid.variables['air'].var_desc,\
nc_fid.variables['air'].units))
plt.title("Departure of Global %s from\n%s for %s" %\
(nc_fid.variables['air'].var_desc, darwin['name'], cur_time))
plt.show()
# Close original NetCDF file.
nc_fid.close()
|
gpl-2.0
|
mrtommyb/GP_model_Kepler_data
|
code/transitemcee.py
|
3
|
44176
|
import sys
import numpy as np
#import matplotlib.pyplot as plt
import emcee
import tmodtom as tmod
import time as thetime
from scipy.stats import truncnorm
from claretquadpy import claretquad
from claret4ppy import claretlimb4p
from copy import deepcopy
from numpy import random
#from bilin_interp import ld_quad
class transitemcee(object):
def __init__(self,nplanets,cadence=1625.3,
ldfileloc='/Users/tom/svn_code/tom_code/',
codedir='/Users/tom/svn_code/tom_code/'):
sys.path.append(codedir)
self.nplanets = nplanets
nmax = 1500000 #from the fortran
self._ntt = np.zeros(nplanets)
self._tobs = np.empty([self.nplanets,nmax])
self._omc = np.empty([self.nplanets,nmax])
self.cadence = cadence / 86400.
self.allow_ecc_orbit = False
self.ldfileloc = ldfileloc
self.onlytransits = False
self.tregion = 500
def get_stellar(self,teff,logg,FeH,n_ldparams=4):
"""
read in stellar parameters
inputs
teff : float
The effective temperature of the star
logg : float
the surface gravity of the star in log cgs
FeH : float
the metalicity of the star in log solar
optional
n_ldparams : int
"""
self.Teff = teff
self.logg = logg
self.FeH = FeH
if n_ldparams == 2:
#if teff < 3500 and logg >= 3.5:
if False:
#this block should never run
ldfile = self.ldfileloc + 'claret-quad-phoenix.txt'
self.ld1,self.ld2 = ld_quad(ldfile,
self.Teff,self.logg)
self.ld3 = 0.0
self.ld4 = 0.0
#elif logg < 3.5 or teff >= 3500:
if True:
ldfile = self.ldfileloc + 'claret-limb-quad.txt'
self.ld1,self.ld2 = claretquad(ldfile,
self.Teff,self.logg,self.FeH)
self.ld3 = 0.0
self.ld4 = 0.0
elif n_ldparams == 4:
ldfile = self.ldfileloc + 'claret-limb.txt'
self.ld1,self.ld2,self.ld3,self.ld4 = claretlimb4p(ldfile,
self.Teff,self.logg,self.FeH)
def open_lightcurve(self,filename,timeoffset=0.0,
normalize=False):
t = np.genfromtxt(filename).T
time = t[0] - timeoffset
if normalize:
flux = t[1] / np.median(t[1])
err = t[2] / np.median(t[1])
else:
flux = t[1]
err = t[2]
self.time = time
self.flux = flux
self.err = err
self.npt = len(time)
self._itime = np.zeros(self.npt) + self.cadence
self._datatype = np.zeros(self.npt)
def already_open(self,t1,f1,e1,timeoffset=0.0,normalize=False):
time = t1 - timeoffset
if normalize:
flux = f1 / np.median(f1)
err = e1 / np.median(f1)
else:
flux = f1
err = e1
self.time = time
self.flux = flux
self.err = err
self.npt = len(time)
self._itime = np.zeros(self.npt) + self.cadence
self._datatype = np.zeros(self.npt)
def get_rho(self,rho_vals,prior=False,rho_start=0.0,
rho_stop = 30.):
"""
inputs
rho_vals : array_like
Two parameter array with value
rho, rho_unc
prior : bool, optional
should this rho be used as a prior?
"""
self.rho_0 = rho_vals[0]
self.rho_0_unc = rho_vals[1]
self.rho_0_start = rho_start
self.rho_0_stop = rho_stop
if prior:
self.rho_prior = True
else:
self.rho_prior = False
def get_zpt(self,zpt_0):
self.zpt_0 = zpt_0
if self.zpt_0 == 0.0:
self.zpt_0 = 1.E-10
def get_sol(self,*args,**kwargs):
"""
reads the guess transit fit solution
There are 6 args for every planet
T0, period, impact paramter, rp/rs, ecosw and esinw
optional keywords, these are kept fixed (for now)
dil : float, optional
dilution
veloffset : float, optional
velocity zeropoint
rvamp : float, optional
radial velocity amplitude from doppler beaming
occ : float, optional
occultation depth
ell : float, optional
amplitude of ellipsoidal variations
alb : float, optional
geometric albedo of the planet
"""
assert len(args) == self.nplanets * 6
if 'dil' in kwargs.keys():
dil = kwargs['dil']
print ' running with dil = %s' %(dil)
else:
dil = 0.0
if 'veloffset' in kwargs.keys():
veloffset = kwargs['veloffset']
else:
veloffset = 0.0
if 'rvamp' in kwargs.keys():
rvamp = kwargs['rvamp']
else:
rvamp = 0.0
if 'occ' in kwargs.keys():
occ = kwargs['occ']
else:
occ = 0.0
if 'ell' in kwargs.keys():
ell = kwargs['ell']
else:
ell = 0.0
if 'alb' in kwargs.keys():
alb = kwargs['alb']
else:
alb = 0.0
try:
if self.zpt_0 == 0.:
self.zpt_0 = 1.E-10
except AttributeError:
self.zpt_0 = 1.E-10
self.zpt_0_unc = 1.E-6
fit_sol = np.array([self.rho_0,self.zpt_0])
for i in xrange(self.nplanets):
T0_0 = args[i*6]
per_0 = args[i*6 +1]
b_0 = args[i*6 +2]
rprs_0 = args[i*6 +3]
ecosw_0 = args[i*6 +4]
esinw_0 = args[i*6 +5]
new_params = np.array([T0_0,per_0,
b_0,rprs_0,ecosw_0,esinw_0])
fit_sol = np.r_[fit_sol,new_params]
self.fit_sol = fit_sol
self.fit_sol_0 = deepcopy(self.fit_sol)
self.fixed_sol = np.array([self.ld1,self.ld2,
self.ld3,self.ld4,
dil,veloffset,rvamp,
occ,ell,alb])
def cut_non_transit(self,ntdur=10):
#make a mask for each planet candidate
self.onlytransits = True
tregion = np.zeros(self.nplanets)
maskdat = np.zeros([self.npt,self.nplanets],dtype=bool)
for i in xrange(self.nplanets):
T0 = self.fit_sol[i*6 + 2]
per = self.fit_sol[i*6 + 3]
rho = self.fit_sol[0]
ars = self.get_ar(rho,per)
tdur_dys = (1./ars) * per * (1./np.pi)
#this is buggy because T0 is not nessessarily time of first transit
#but time of a transit. So fudge.
#subtract make T0 the first transit
time0 = np.copy(T0)
while True:
if time0 - per < self.time[0]:
break
else:
time0 = time0 - per
ntransits = int((self.time[-1] - self.time[0]) / per) + 1
t_times = np.arange(ntransits)*per + T0
#make sure the first and last transit are not excluded even if
#partially in the data
t_times = np.r_[t_times,t_times[0] - per,t_times[-1] + per]
for j in t_times:
maskdat[:,i] = np.logical_or(maskdat[:,i],
np.logical_and(
self.time < j +tdur_dys*ntdur,
self.time > j - tdur_dys*ntdur) )
tregion[i] = ntdur*tdur_dys
#create a final mask that is the OR of the
#individual masks
finmask = np.zeros(self.npt)
for i in xrange(self.nplanets):
finmask = np.logical_or(finmask,maskdat[:,i])
self.time = self.time[finmask]
self.flux = self.flux[finmask]
self.err = self.err[finmask]
self._itime = self._itime[finmask]
self._datatype = self._datatype[finmask]
self.tregion = tregion
def get_ar(self,rho,period):
""" gets a/R* from period and mean stellar density"""
G = 6.67E-11
rho_SI = rho * 1000.
tpi = 3. * np.pi
period_s = period * 86400.
part1 = period_s**2 * G * rho_SI
ar = (part1 / tpi)**(1./3.)
return ar
# def calc_model(self,fitsol):
# sol = np.zeros([8 + 10*self.nplanets])
# rho = fitsol[0]
# zpt = fitsol[1]
# ld1,ld2,ld3,ld4 = self.fixed_sol[0:4]
# dil = self.fixed_sol[4]
# veloffset = self.fixed_sol[5]
# fixed_stuff = self.fixed_sol[6:10]
# sol[0:8] = np.array([rho,ld1,ld2,ld3,ld4,
# dil,veloffset,zpt])
# for i in xrange(self.nplanets):
# sol[8+(i*10):8+(i*10)+10] = np.r_[fitsol[2+i*6:8+i*6],fixed_stuff]
# tmodout = tmod.transitmodel(self.nplanets,sol,self.time,self._itime,
# self._ntt,self._tobs,self._omc,self._datatype)
# return tmodout - 1.
# def logchi2(self,fitsol):
# rho = fitsol[0]
# if rho < 0.001 or rho > 30.:
# return -np.inf
# rprs = fitsol[np.arange(self.nplanets)*6 + 5]
# if np.any(rprs < 0.) or np.any(rprs > 0.5):
# return -np.inf
# ecosw = fitsol[np.arange(self.nplanets)*6 + 6]
# if np.any(ecosw < -1.0) or np.any(ecosw > 1.0):
# return -np.inf
# esinw = fitsol[np.arange(self.nplanets)*6 + 7]
# if np.any(esinw < -1.0) or np.any(esinw > 1.0):
# return -np.inf
# b = fitsol[np.arange(self.nplanets)*6 + 4]
# if np.any(b < 0.) or np.any(b > 1.0 + rprs):
# return -np.inf
# model_lc = self.calc_model(fitsol)
# if self.rho_prior:
# chi2prior = (self.rho_0 - rho)**2 / self.rho_0_unc**2
# else:
# chi2prior = 0.0
# chi2val = np.sum((model_lc - self.flux)**2 / self.err**2)
# chi2tot = chi2val + chi2prior
# logp = -chi2tot / 2.
# return logp
# def do_emcee(self,nwalkers,threads=16,burnin=100,fullrun=1000):
# l_var = 8
# p0 = self.get_guess(nwalkers)
# sampler = emcee.EnsembleSampler(nwalkers, l_var, self.logchi2,
# threads=threads)
# time1 = thetime.time()
# pos, prob, state = sampler.run_mcmc(p0, burnin)
# sampler.reset()
# time2 = thetime.time()
# print 'burn-in took ' + str((time2 - time1)/60.) + ' min'
# time1 = thetime.time()
# sampler.run_mcmc(pos, fullrun)
# time2 = thetime.time()
# print 'MCMC run took ' + str((time2 - time1)/60.) + ' min'
# print
# print("Mean acceptance: "
# + str(np.mean(sampler.acceptance_fraction)))
# print
# try:
# print("Autocorrelation times sampled:", fullrun / sampler.acor)
# except RuntimeError:
# print("No Autocorrelation")
# return sampler, (time2 - time1)/60.
def get_guess(self,nwalkers):
"""
pick sensible starting ranges for the guess parameters
T0, period, impact paramter, rp/rs, ecosw and esinw
"""
rho_unc = 0.001
zpt_unc = 1.E-8
T0_unc = 0.0002
per_unc = 0.00005
b_unc = 0.001
rprs_unc = 0.0001
ecosw_unc = 0.001
esinw_unc = 0.001
p0 = np.zeros([nwalkers,2+self.nplanets*6])
rho = self.fit_sol[0]
zpt = self.fit_sol[1]
start,stop = (0.0001 - rho) / rho_unc, (30.0 - rho) / rho_unc
p0[...,0] = truncnorm.rvs(start,stop
,loc=rho,scale=rho_unc,size=nwalkers)
p0[...,1] = np.random.normal(loc=zpt,scale=zpt,size=nwalkers)
for i in xrange(self.nplanets):
T0,per,b,rprs,ecosw,esinw = self.fit_sol[i*6+2:i*6 + 8]
b = 0.0
ecosw = 0.0
esinw = 0.0
p0[...,i*6+2] = np.random.normal(T0,T0_unc,size=nwalkers)
p0[...,i*6+3] = np.random.normal(per,per_unc,size=nwalkers)
start,stop = (0.0 - b) / b_unc, (0.5 - b) / b_unc
p0[...,i*6+4] = truncnorm.rvs(start,stop
,loc=b,scale=b_unc,size=nwalkers)
start,stop = (0.0 - rprs) / rprs_unc, (0.5 - rprs) / rprs_unc
p0[...,i*6+5] = truncnorm.rvs(start,stop
,loc=rprs,scale=rprs_unc,size=nwalkers)
start,stop = (0.0 - ecosw) / ecosw_unc, (0.5 - ecosw) / ecosw_unc
p0[...,i*6+6] = truncnorm.rvs(start,stop
,loc=ecosw,scale=ecosw_unc,size=nwalkers)
start,stop = (0.0 - esinw) / esinw_unc, (0.5 - esinw) / esinw_unc
p0[...,i*6+7] = truncnorm.rvs(start,stop
,loc=esinw,scale=esinw_unc,size=nwalkers)
return p0
class transitemcee_paramprior(transitemcee):
def __init__(self,nplanets,cadence=1626.3,
ldfileloc='/Users/tom/svn_code/tom_code/'):
transitemcee.__init__(self,nplanets,cadence,ldfileloc)
def get_stellar(self,teff,teff_unc,logg,logg_unc,FeH,FeH_unc,
n_ldparams=2):
"""
read in stellar parameters
inputs
teff : float
The effective temperature of the star
logg : float
the surface gravity of the star in log cgs
FeH : float
the metalicity of the star in log solar
optional
n_ldparams : int
"""
self.Teff = teff
self.Teff_unc = teff_unc
self.logg = logg
self.logg_unc = logg_unc
self.FeH = FeH
self.FeH_unc = FeH_unc
self.n_ldparams = n_ldparams
def get_sol(self,*args,**kwargs):
"""
reads the guess transit fit solution
There are 6 args for every planet
T0, period, impact paramter, rp/rs, ecosw and esinw
optional keywords, these are kept fixed (for now)
dil : float, optional
dilution
veloffset : float, optional
velocity zeropoint
rvamp : float, optional
radial velocity amplitude from doppler beaming
occ : float, optional
occultation depth
ell : float, optional
amplitude of ellipsoidal variations
alb : float, optional
geometric albedo of the planet
"""
assert len(args) == self.nplanets * 6
if 'dil' in kwargs.keys():
dil = kwargs['dil']
print ' running with dil = %s' %(dil)
else:
dil = 0.0
if 'veloffset' in kwargs.keys():
veloffset = kwargs['veloffset']
else:
veloffset = 0.0
if 'rvamp' in kwargs.keys():
rvamp = kwargs['rvamp']
else:
rvamp = 0.0
if 'occ' in kwargs.keys():
occ = kwargs['occ']
else:
occ = 0.0
if 'ell' in kwargs.keys():
ell = kwargs['ell']
else:
ell = 0.0
if 'alb' in kwargs.keys():
alb = kwargs['alb']
else:
alb = 0.0
try:
if self.zpt_0 == 0.:
self.zpt_0 = 1.E-10
except AttributeError:
self.zpt_0 = 1.E-10
self.zpt_0_unc = 1.E-6
fit_sol = np.array([self.rho_0,self.zpt_0,self.Teff,self.logg,self.FeH])
for i in xrange(self.nplanets):
T0_0 = args[i*6]
per_0 = args[i*6 +1]
b_0 = args[i*6 +2]
rprs_0 = args[i*6 +3]
ecosw_0 = args[i*6 +4]
esinw_0 = args[i*6 +5]
new_params = np.array([T0_0,per_0,
b_0,rprs_0,ecosw_0,esinw_0])
fit_sol = np.r_[fit_sol,new_params]
self.fit_sol = fit_sol
self.fit_sol_0 = deepcopy(self.fit_sol)
self.fixed_sol = np.array([
dil,veloffset,rvamp,
occ,ell,alb])
def get_guess(self,nwalkers):
"""
pick sensible starting ranges for the guess parameters
T0, period, impact paramter, rp/rs, ecosw and esinw
"""
rho_unc = 0.001
zpt_unc = 1.E-8
teff_unc = 10
logg_unc = 0.01
feh_unc = 0.01
T0_unc = 0.0002
per_unc = 0.00005
b_unc = 0.001
rprs_unc = 0.0001
ecosw_unc = 0.001
esinw_unc = 0.001
p0 = np.zeros([nwalkers,5+self.nplanets*6])
rho = self.fit_sol[0]
zpt = self.fit_sol[1]
teff = self.fit_sol[2]
logg = self.fit_sol[3]
feh = self.fit_sol[4]
start,stop = (0.0001 - rho) / rho_unc, (30.0 - rho) / rho_unc
p0[...,0] = truncnorm.rvs(start,stop
,loc=rho,scale=rho_unc,size=nwalkers)
p0[...,1] = np.random.normal(loc=zpt,scale=zpt,size=nwalkers)
start,stop = (3500. - teff) / teff_unc, (50000. - teff) / teff_unc
p0[...,2] = truncnorm.rvs(start,stop
,loc=teff,scale=teff_unc,size=nwalkers)
start,stop = (0.0 - logg) / logg_unc, (5. - logg) / logg_unc
p0[...,3] = truncnorm.rvs(start,stop
,loc=logg,scale=logg_unc,size=nwalkers)
start,stop = (-5.0 - feh) / feh_unc, (1.0 - feh) / feh_unc
p0[...,4] = truncnorm.rvs(start,stop
,loc=feh,scale=feh_unc,size=nwalkers)
for i in xrange(self.nplanets):
T0,per,b,rprs,ecosw,esinw = self.fit_sol[i*6+5:i*6 + 11]
b = 0.0
ecosw = 0.0
esinw = 0.0
p0[...,i*6+5] = np.random.normal(T0,T0_unc,size=nwalkers)
p0[...,i*6+6] = np.random.normal(per,per_unc,size=nwalkers)
start,stop = (0.0 - b) / b_unc, (0.5 - b) / b_unc
p0[...,i*6+7] = truncnorm.rvs(start,stop
,loc=b,scale=b_unc,size=nwalkers)
start,stop = (0.0 - rprs) / rprs_unc, (0.5 - rprs) / rprs_unc
p0[...,i*6+8] = truncnorm.rvs(start,stop
,loc=rprs,scale=rprs_unc,size=nwalkers)
start,stop = (0.0 - ecosw) / ecosw_unc, (0.5 - ecosw) / ecosw_unc
p0[...,i*6+9] = truncnorm.rvs(start,stop
,loc=ecosw,scale=ecosw_unc,size=nwalkers)
start,stop = (0.0 - esinw) / esinw_unc, (0.5 - esinw) / esinw_unc
p0[...,i*6+10] = truncnorm.rvs(start,stop
,loc=esinw,scale=esinw_unc,size=nwalkers)
return p0
def cut_non_transit(self,ntdur=10):
#make a mask for each planet candidate
self.onlytransits = True
tregion = np.zeros(self.nplanets)
maskdat = np.zeros([self.npt,self.nplanets],dtype=bool)
for i in xrange(self.nplanets):
T0 = self.fit_sol[i*6 + 5]
per = self.fit_sol[i*6 + 6]
rho = self.fit_sol[0]
ars = self.get_ar(rho,per)
tdur_dys = (1./ars) * per * (1./np.pi)
#this is buggy because T0 is not nessessarily time of first transit
#but time of a transit. So fudge.
#subtract make T0 the first transit
time0 = np.copy(T0)
while True:
if time0 - per < self.time[0]:
break
else:
time0 = time0 - per
ntransits = int((self.time[-1] - self.time[0]) / per) + 1
t_times = np.arange(ntransits)*per + T0
#make sure the first and last transit are not excluded even if
#partially in the data
t_times = np.r_[t_times,t_times[0] - per,t_times[-1] + per]
for j in t_times:
maskdat[:,i] = np.logical_or(maskdat[:,i],
np.logical_and(
self.time < j +tdur_dys*ntdur,
self.time > j - tdur_dys*ntdur) )
tregion[i] = ntdur*tdur_dys
#create a final mask that is the OR of the
#individual masks
finmask = np.zeros(self.npt)
for i in xrange(self.nplanets):
finmask = np.logical_or(finmask,maskdat[:,i])
self.time = self.time[finmask]
self.flux = self.flux[finmask]
self.err = self.err[finmask]
self._itime = self._itime[finmask]
self._datatype = self._datatype[finmask]
self.tregion = tregion
class transitemcee_paramprior_occ(transitemcee_paramprior):
pass
class transitemcee_fitldp(transitemcee):
def __init__(self,nplanets,cadence=1626.3,
ldfileloc='/Users/tom/svn_code/tom_code/',
codedir='/Users/tom/svn_code/tom_code/'):
transitemcee.__init__(self,nplanets,cadence,ldfileloc,codedir)
def get_stellar(self,teff,logg,FeH,
n_ldparams=2,ldp_prior=True):
"""
read in stellar parameters
inputs
teff : float
The effective temperature of the star
logg : float
the surface gravity of the star in log cgs
FeH : float
the metalicity of the star in log solar
optional
n_ldparams : int
"""
self.Teff = teff
self.logg = logg
self.FeH = FeH
self.ld1_unc = 0.1
self.ld2_unc = 0.1
self.ld3_unc = 0.1
self.ld4_unc = 0.1
if teff < 3500:
teff = 3500
self.ld1_unc = 0.2
self.ld2_unc = 0.2
if logg < 0.0:
logg = 0.0
self.ld1_unc = 0.05
self.ld2_unc = 0.05
if logg > 5.0:
logg = 5.0
self.ld1_unc = 0.05
self.ld2_unc = 0.05
if FeH < -5.0:
FeH = -5.0
self.ld1_unc = 0.05
self.ld2_unc = 0.05
if FeH > 1.0:
FeH = 1.0
self.ld1_unc = 0.05
self.ld2_unc = 0.05
if n_ldparams == 2:
ldfile = self.ldfileloc + 'claret-limb-quad.txt'
self.ld1,self.ld2 = claretquad(ldfile,
teff,logg,FeH)
self.ld3 = 0.0
self.ld4 = 0.0
if teff < 3500:
self.ld1,self.ld2 = claretquad(ldfile,
3500.,logg,FeH)
elif n_ldparams == 4:
ldfile = self.ldfileloc + 'claret-limb.txt'
self.ld1,self.ld2,self.ld3,self.ld4 = claretlimb4p(
ldfile,
self.Teff,self.logg,self.FeH)
self.ldp_prior = ldp_prior
self.n_ldparams = n_ldparams
def get_sol(self,*args,**kwargs):
"""
reads the guess transit fit solution
There are 6 args for every planet
T0, period, impact paramter, rp/rs, ecosw and esinw
optional keywords, these are kept fixed (for now)
dil : float, optional
dilution
veloffset : float, optional
velocity zeropoint
rvamp : float, optional
radial velocity amplitude from doppler beaming
occ : float, optional
occultation depth
ell : float, optional
amplitude of ellipsoidal variations
alb : float, optional
geometric albedo of the planet
"""
assert len(args) == self.nplanets * 6
if 'dil' in kwargs.keys():
dil = kwargs['dil']
print ' running with dil = %s' %(dil)
else:
dil = 0.0
if 'veloffset' in kwargs.keys():
veloffset = kwargs['veloffset']
else:
veloffset = 0.0
if 'rvamp' in kwargs.keys():
rvamp = kwargs['rvamp']
else:
rvamp = 0.0
if 'occ' in kwargs.keys():
occ = kwargs['occ']
else:
occ = 0.0
if 'ell' in kwargs.keys():
ell = kwargs['ell']
else:
ell = 0.0
if 'alb' in kwargs.keys():
alb = kwargs['alb']
else:
alb = 0.0
try:
if self.zpt_0 == 0.:
self.zpt_0 = 1.E-10
except AttributeError:
self.zpt_0 = 1.E-10
self.zpt_0_unc = 1.E-6
if self.n_ldparams == 2:
fit_sol = np.array([self.rho_0,self.zpt_0,
self.ld1,self.ld2])
elif self.n_ldparams == 4:
fit_sol = np.array([self.rho_0,self.zpt_0,
self.ld1,self.ld2,self.ld3, self.ld4])
for i in xrange(self.nplanets):
T0_0 = args[i*6]
per_0 = args[i*6 +1]
b_0 = args[i*6 +2]
rprs_0 = args[i*6 +3]
ecosw_0 = args[i*6 +4]
esinw_0 = args[i*6 +5]
new_params = np.array([T0_0,per_0,
b_0,rprs_0,ecosw_0,esinw_0])
fit_sol = np.r_[fit_sol,new_params]
self.fit_sol = fit_sol
self.fit_sol_0 = deepcopy(self.fit_sol)
self.fixed_sol = np.array([
dil,veloffset,rvamp,
occ,ell,alb])
def get_guess(self,nwalkers):
"""
pick sensible starting ranges for the guess parameters
T0, period, impact paramter, rp/rs, ecosw and esinw
"""
rho_unc = 0.1
zpt_unc = 1.E-8
ld1_unc = 0.05
ld2_unc = 0.05
ld3_unc = 0.05
ld4_unc = 0.05
T0_unc = 0.0002
per_unc = 0.00005
b_unc = 0.001
rprs_unc = 0.0001
ecosw_unc = 0.001
esinw_unc = 0.001
#p0 = np.zeros([nwalkers,4+self.nplanets*6])
if self.n_ldparams == 2:
p0 = np.zeros([nwalkers,4+self.nplanets*6+1])
elif self.n_ldparams == 4:
p0 = np.zeros([nwalkers,6+self.nplanets*6+1])
rho = self.fit_sol[0]
zpt = self.fit_sol[1]
ld1 = self.fit_sol[2]
ld2 = self.fit_sol[3]
if self.n_ldparams == 4:
ld3 = self.fit_sol[4]
ld4 = self.fit_sol[5]
addval = 2
start,stop = (0.0 - ld3) / ld3_unc, (1.0 - ld3) / ld3_unc
p0[...,4] = truncnorm.rvs(start,stop
,loc=ld3,scale=ld3_unc,size=nwalkers)
start,stop = (0.0 - ld4) / ld4_unc, (1.0 - ld4) / ld4_unc
p0[...,5] = truncnorm.rvs(start,stop
,loc=ld4,scale=ld4_unc,size=nwalkers)
else:
addval = 0
start,stop = (0.0001 - rho) / rho_unc, (30.0 - rho) / rho_unc
p0[...,0] = truncnorm.rvs(start,stop
,loc=rho,scale=rho_unc,size=nwalkers)
p0[...,1] = np.random.normal(loc=zpt,scale=zpt,size=nwalkers)
start,stop = (0.0 - ld1) / ld1_unc, (1.0 - ld1) / ld1_unc
p0[...,2] = truncnorm.rvs(start,stop
,loc=ld1,scale=ld1_unc,size=nwalkers)
start,stop = (0.0 - ld2) / ld2_unc, (1.0 - ld2) / ld2_unc
p0[...,3] = truncnorm.rvs(start,stop
,loc=ld2,scale=ld2_unc,size=nwalkers)
for i in xrange(self.nplanets):
(T0,per,b,rprs,ecosw,
esinw) = self.fit_sol[i*6+4+addval:i*6 + 10+addval]
b = 0.2
ecosw = 0.0
esinw = 0.0
p0[...,i*6+4+addval] = np.random.normal(
T0,T0_unc,size=nwalkers)
p0[...,i*6+5+addval] = np.random.normal(
per,per_unc,size=nwalkers)
start,stop = (0.0 - b) / b_unc, (0.5 - b) / b_unc
p0[...,i*6+6+addval] = truncnorm.rvs(
start,stop
,loc=b,scale=b_unc,size=nwalkers)
start,stop = (0.0 - rprs) / rprs_unc, (0.5 - rprs) / rprs_unc
p0[...,i*6+7+addval] = truncnorm.rvs(
start,stop
,loc=rprs,scale=rprs_unc,size=nwalkers)
start,stop = (0.0 - ecosw) / ecosw_unc, (0.5 - ecosw) / ecosw_unc
p0[...,i*6+8+addval] = truncnorm.rvs(
start,stop
,loc=ecosw,scale=ecosw_unc,size=nwalkers)
start,stop = (0.0 - esinw) / esinw_unc, (0.5 - esinw) / esinw_unc
p0[...,i*6+9+addval] = truncnorm.rvs(
start,stop
,loc=esinw,scale=esinw_unc,size=nwalkers)
#this is the jitter term
#make it like self.err
errterm = np.median(self.err)
start,stop = 0.0,10.
p0[...,-1] = truncnorm.rvs(start,stop,
loc=0.0,scale=0.1*errterm,size=nwalkers)
return p0
def cut_non_transit(self,ntdur=10):
#make a mask for each planet candidate
self.onlytransits = True
tregion = np.zeros(self.nplanets)
maskdat = np.zeros([self.npt,self.nplanets],dtype=bool)
if self.n_ldparams == 2:
addval = 0
elif self.n_ldparams == 4:
addval = 2
for i in xrange(self.nplanets):
T0 = self.fit_sol[i*6 + 4+addval]
per = self.fit_sol[i*6 + 5+addval]
rho = self.fit_sol[0]
ars = self.get_ar(rho,per)
tdur_dys = (1./ars) * per * (1./np.pi)
#this is buggy because T0 is not nessessarily time of first transit
#but time of a transit. So fudge.
#subtract make T0 the first transit
time0 = np.copy(T0)
while True:
if time0 - per < self.time[0]:
break
else:
time0 = time0 - per
ntransits = int((self.time[-1] - self.time[0]) / per) + 1
t_times = np.arange(ntransits)*per + T0
#make sure the first and last transit are not excluded even if
#partially in the data
t_times = np.r_[t_times,t_times[0] - per,t_times[-1] + per]
for j in t_times:
maskdat[:,i] = np.logical_or(maskdat[:,i],
np.logical_and(
self.time < j +tdur_dys*ntdur,
self.time > j - tdur_dys*ntdur) )
tregion[i] = ntdur*tdur_dys
#create a final mask that is the OR of the
#individual masks
finmask = np.zeros(self.npt)
for i in xrange(self.nplanets):
finmask = np.logical_or(finmask,maskdat[:,i])
self.time = self.time[finmask]
self.flux = self.flux[finmask]
self.err = self.err[finmask]
self._itime = self._itime[finmask]
self._datatype = self._datatype[finmask]
self.tregion = tregion
def get_ar(rho,period):
""" gets a/R* from period and mean stellar density"""
G = 6.67E-11
rho_SI = rho * 1000.
tpi = 3. * np.pi
period_s = period * 86400.
part1 = period_s**2 * G * rho_SI
ar = (part1 / tpi)**(1./3.)
return ar
def logchi2(fitsol,nplanets,rho_0,rho_0_unc,rho_prior,
flux,err,fixed_sol,time,itime,ntt,tobs,omc,datatype,
onlytransits=False,tregion=0.0):
#here are some priors to keep values sensible
rho = fitsol[0]
if rho < 0.0001 or rho > 100.:
return -np.inf
rprs = fitsol[np.arange(nplanets)*6 + 5]
if np.any(rprs < 0.) or np.any(rprs > 0.5):
return -np.inf
ecosw = fitsol[np.arange(nplanets)*6 + 6]
if np.any(ecosw < -1.0) or np.any(ecosw > 1.0):
return -np.inf
esinw = fitsol[np.arange(nplanets)*6 + 7]
if np.any(esinw < -1.0) or np.any(esinw > 1.0):
return -np.inf
#avoid parabolic orbits
ecc = np.sqrt(esinw**2 + ecosw**2)
if np.any(ecc > 1.0):
return -np.inf
#avoid orbits where the planet enters the star
per = fitsol[np.arange(nplanets)*6 + 3]
ar = get_ar(rho,per)
if np.any(ecc > (1.-(1./ar))):
return -np.inf
b = fitsol[np.arange(nplanets)*6 + 4]
if np.any(b < 0.) or np.any(b > 1.0 + rprs):
return -np.inf
if onlytransits:
T0 = fitsol[np.arange(nplanets)*6 + 2]
if np.any(T0 < T0 - tregion) or np.any(T0 > T0 + tregion):
return -np.inf
model_lc = calc_model(fitsol,nplanets,fixed_sol,
time,itime,ntt,tobs,omc,datatype)
if rho_prior:
chi2prior = (rho_0 - rho)**2 / rho_0_unc**2
else:
chi2prior = 0.0
ecc[ecc == 0.0] = 1.E-10
chi2ecc = np.log(1. / ecc)
chi2val = np.sum((model_lc - flux)**2 / err**2)
chi2const = np.log(1. / (np.sqrt(2.*np.pi) * np.mean(err)))
chi2tot = (-chi2val/2.) + chi2prior
#include eccentricity in the prior
#having np.log(chi2ecc) -> e**(-chi2/2) / ecc
logp = chi2tot + np.sum(chi2ecc)
return logp
def calc_model(fitsol,nplanets,fixed_sol,time,itime,ntt,tobs,omc,datatype):
sol = np.zeros([8 + 10*nplanets])
rho = fitsol[0]
zpt = fitsol[1]
ld1,ld2,ld3,ld4 = fixed_sol[0:4]
dil = fixed_sol[4]
veloffset = fixed_sol[5]
fixed_stuff = fixed_sol[6:10]
sol[0:8] = np.array([rho,ld1,ld2,ld3,ld4,
dil,veloffset,zpt])
for i in xrange(nplanets):
sol[8+(i*10):8+(i*10)+10] = np.r_[fitsol[2+i*6:8+i*6],fixed_stuff]
tmodout = tmod.transitmodel(nplanets,sol,time,itime,
ntt,tobs,omc,datatype)
return tmodout - 1.
def logchi2_paramprior(fitsol,nplanets,rho_0,rho_0_unc,rho_prior,
teff_0,teff_0_unc,logg_0,logg_0_unc,feh_0,feh_0_unc,
flux,err,fixed_sol,time,itime,ntt,tobs,omc,datatype,
n_ldparams=2,ldfileloc='/Users/tom/svn_code/tom_code/',
onlytransits=False,tregion=0.0):
minf = -np.inf
#here are some priors to keep values sensible
rho = fitsol[0]
if rho < 1.E-6 or rho > 100.:
return minf
teff = fitsol[2]
if teff < 3500 or teff > 50000.:
return minf
logg = fitsol[3]
if logg < 0.0 or logg > 5.:
return minf
feh = fitsol[4]
if feh < -5. or feh > 1.:
return minf
rprs = fitsol[np.arange(nplanets)*6 + 8]
if np.any(rprs < 0.) or np.any(rprs > 0.5):
return minf
ecosw = fitsol[np.arange(nplanets)*6 + 9]
if np.any(ecosw < -1.0) or np.any(ecosw > 1.0):
return minf
esinw = fitsol[np.arange(nplanets)*6 + 10]
if np.any(esinw < -1.0) or np.any(esinw > 1.0):
return minf
#avoid parabolic orbits
ecc = np.sqrt(esinw**2 + ecosw**2)
if np.any(ecc > 1.0):
return minf
#avoid orbits where the planet enters the star
per = fitsol[np.arange(nplanets)*6 + 6]
ar = get_ar(rho,per)
if np.any(ecc > (1.-(1./ar))):
return minf
b = fitsol[np.arange(nplanets)*6 + 7]
if np.any(b < 0.) or np.any(b > 1.0 + rprs):
return minf
if onlytransits:
T0 = fitsol[np.arange(nplanets)*6 + 5]
if np.any(T0 < T0 - tregion) or np.any(T0 > T0 + tregion):
return minf
#calc thing limb darkening here
if n_ldparams == 2:
#if teff < 3500 and logg >= 3.5:
if False:
#this block should never run
ldfile = ldfileloc + 'claret-quad-phoenix.txt'
ld1,ld2 = ld_quad(ldfile,
teff,logg)
ld3 = 0.0
ld4 = 0.0
#elif logg < 3.5 or teff >= 3500:
if True:
ldfile = ldfileloc + 'claret-limb-quad.txt'
ld1,ld2 = claretquad(ldfile,
teff,logg,feh)
ld3 = 0.0
ld4 = 0.0
elif n_ldparams == 4:
ldfile = ldfileloc + 'claret-limb.txt'
ld1,ld2,ld3,ld4 = claretlimb4p(ldfile,
teff,logg,feh)
lds = np.array([ld1,ld2,ld3,ld4])
fitsol_model_calc = np.r_[fitsol[0:2],fitsol[5:]]
fixed_sol_model_calc = np.r_[lds,fixed_sol]
model_lc = calc_model(fitsol_model_calc,nplanets,fixed_sol_model_calc,
time,itime,ntt,tobs,omc,datatype)
if rho_prior:
rho_prior = (rho_0 - rho)**2 / rho_0_unc**2
#teff_prior = (teff_0 - teff)**2 / teff_0_unc**2
#logg_prior = (logg_0 - logg)**2 / logg_0_unc**2
#feh_prior = (feh_0 - feh)**2 / feh_0_unc**2
#chi2prior = rho_prior+teff_prior+logg_prior+feh_prior
else:
rho_prior = 0.0
teff_prior = (teff_0 - teff)**2 / teff_0_unc**2
logg_prior = (logg_0 - logg)**2 / logg_0_unc**2
feh_prior = (feh_0 - feh)**2 / feh_0_unc**2
chi2prior = -0.5*(rho_prior+teff_prior+logg_prior+feh_prior)
ecc[ecc == 0.0] = 1.E-10
chi2ecc = np.log(1. / ecc)
chi2val = -0.5*np.sum(((model_lc - flux)* (model_lc - flux))
/ (err*err))
#chi2const = np.log(np.sum(1./(np.sqrt(2.*np.pi)*err)))
chi2const = 0.0
chi2tot = chi2const + chi2val + chi2prior
#include eccentricity in the prior
#having np.log(chi2ecc) -> e**(-chi2/2) / ecc
logp = chi2tot + np.sum(chi2ecc)
return logp
def logchi2_fitldp(fitsol,nplanets,rho_0,rho_0_unc,rho_prior,
ld1_0,ld1_0_unc,ld2_0,ld2_0_unc,ldp_prior,
flux,err,fixed_sol,time,itime,ntt,tobs,omc,datatype,
n_ldparams=2,ldfileloc='/Users/tom/svn_code/tom_code/',
onlytransits=False,tregion=0.0):
minf = -np.inf
#here are some priors to keep values sensible
rho = fitsol[0]
if rho < 1.E-6 or rho > 100.:
return minf
ld1 = fitsol[2]
ld2 = fitsol[3]
#some lind darkening constraints
#from Burke et al. 2008 (XO-2b)
if ld1 < 0.0:
return minf
if ld1 + ld2 > 1.0:
return minf
if ld1 + 2.*ld2 < 0.0:
return minf
if ld2 < -0.8:
return minf
if n_ldparams == 2:
ld3, ld4 = 0.0,0.0
addval = 0
elif n_ldparams == 4:
ld3 = fitsol[4]
ld4 = fitsol[5]
addval = 2
rprs = fitsol[np.arange(nplanets)*6 + 7 + addval]
if np.any(rprs < 0.) or np.any(rprs > 0.5):
return minf
ecosw = fitsol[np.arange(nplanets)*6 + 8+addval]
if np.any(ecosw < -1.0) or np.any(ecosw > 1.0):
return minf
esinw = fitsol[np.arange(nplanets)*6 + 9+addval]
if np.any(esinw < -1.0) or np.any(esinw > 1.0):
return minf
#avoid parabolic orbits
ecc = np.sqrt(esinw**2 + ecosw**2)
if np.any(ecc > 1.0):
return minf
#avoid orbits where the planet enters the star
per = fitsol[np.arange(nplanets)*6 + 5+addval]
ar = get_ar(rho,per)
if np.any(ecc > (1.-(1./ar))):
return minf
b = fitsol[np.arange(nplanets)*6 + 6+addval]
if np.any(b < 0.) or np.any(b > 1.0 + rprs):
return minf
if onlytransits:
T0 = fitsol[np.arange(nplanets)*6 + 4+addval]
if np.any(T0 < T0 - tregion) or np.any(T0 > T0 + tregion):
return minf
jitter = fitsol[-1]
if jitter < 0.0:
return minf
err_jit = np.sqrt(err**2 + jitter**2)
err_jit2 = err**2 + jitter**2
lds = np.array([ld1,ld2,ld3,ld4])
fitsol_model_calc = np.r_[fitsol[0:2],fitsol[4:]]
fixed_sol_model_calc = np.r_[lds,fixed_sol]
model_lc = calc_model(fitsol_model_calc,nplanets,fixed_sol_model_calc,
time,itime,ntt,tobs,omc,datatype)
# if rho_prior:
# rhoprior = (rho_0 - rho)**2 / rho_0_unc**2
# else:
# rhoprior = 0.0
# if ldp_prior:
# ldprior1 = (ld1_0 - ld1)*(ld1_0 - ld1) / ld1_0_unc**2
# ldprior2 = (ld2_0 - ld2)*(ld2_0 - ld2) / ld2_0_unc**2
# ldprior = ldprior1 + ldprior2
# else:
# ldprior = 0.0
# chi2prior = -0.5*(rhoprior+ldprior)
ecc[ecc == 0.0] = 1.E-10
#chi2ecc = np.log(1. / ecc)
# chi2val = -0.5*np.sum(((model_lc - flux)* (model_lc - flux))
# / (err_jit*err_jit))
# chi2const = -1.0*np.sum(np.log(err_jit))
# #chi2const = 0.0
# chi2tot = chi2const + chi2val + chi2prior
# #include eccentricity in the prior
# #having np.log(chi2ecc) -> e**(-chi2/2) / ecc
# logp = chi2tot + np.sum(chi2ecc)
npt_lc = len(err_jit)
loglc = (
- (npt_lc/2.)*np.log(2.*np.pi)
- 0.5 * np.sum(np.log(err_jit2))
- 0.5 * np.sum((model_lc - flux)**2 / err_jit2)
)
if rho_prior:
logrho = (
- 0.5 * np.log(2.*np.pi)
- 0.5 * np.log(rho_0_unc**2)
- 0.5 * (rho_0 - rho)**2 / rho_0_unc**2
)
else:
rho_prior = 0.0
if ldp_prior:
logld1 = (
- 0.5 * np.log(2.*np.pi)
- 0.5 * np.log(ld1_0_unc**2)
- 0.5 * (ld1_0 - ld1)**2 / ld1_0_unc**2
)
logld2 = (
- 0.5 * np.log(2.*np.pi)
- 0.5 * np.log(ld2_0_unc**2)
- 0.5 * (ld2_0 - ld2)**2 / ld2_0_unc**2
)
logldp = logld1 + logld2
else:
logldp = 0.0
logecc = - np.sum(np.log(ecc))
logLtot = loglc + logrho + logldp + logecc
return logLtot
# def calc_model_paramprior(fitsol,nplanets,fixed_sol,time,itime,ntt,tobs,omc,datatype):
# sol = np.zeros([8 + 10*nplanets])
# rho = fitsol[0]
# zpt = fitsol[1]
# ld1,ld2,ld3,ld4 = fixed_sol[0:4]
# dil = fixed_sol[4]
# veloffset = fixed_sol[5]
# fixed_stuff = fixed_sol[6:10]
# sol[0:8] = np.array([rho,ld1,ld2,ld3,ld4,
# dil,veloffset,zpt])
# for i in xrange(nplanets):
# sol[8+(i*10):8+(i*10)+10] = np.r_[fitsol[2+i*6:8+i*6],fixed_stuff]
# tmodout = tmod.transitmodel(nplanets,sol,time,itime,
# ntt,tobs,omc,datatype)
# return tmodout - 1.
def get_stats(par_arr,noprint=False):
par_arr
onesig = (1. - 0.682689492) / 2.
twosig = (1. - 0.954499736) / 2.
threesig = (1. - 0.997300204) / 2.
med = np.median(par_arr)
stdev = np.std(par_arr)
sort_arr = np.sort(par_arr)
nval = len(par_arr)
m1 = med - sort_arr[np.floor(onesig * nval)]
p1 = sort_arr[np.floor(nval - (onesig * nval))] - med
m2 = med - sort_arr[np.floor(twosig * nval)]
p2 = sort_arr[np.floor(nval - (twosig * nval))] - med
m3 = med - sort_arr[np.floor(threesig * nval)]
p3 = sort_arr[np.floor(nval - (threesig * nval))] - med
ninefivelow = sort_arr[np.floor(0.025*nval)]
ninefivehigh = sort_arr[np.floor(0.975*nval)]
if not noprint:
print '95percent credible interval = %s - %s' %(ninefivelow,ninefivehigh)
return np.array([med,stdev,p1,m1,p2,m2,p3,m3])
def model_real_paramprior(rho,zpt,teff,logg,feh,T0,
per,b,rprs,ecosw,esinw,
time,itime,ntt,tobs,omc,datatype,
n_ldparams=2,
ldfileloc='/Users/tom/svn_code/tom_code/'):
ldfile = ldfileloc + 'claret-limb-quad.txt'
ld1,ld2 = claretquad(ldfile,teff,logg,feh)
ld3 = 0.0
ld4 = 0.0
dil=0.0
veloffset = 0.0
rvamp = 0.0
occ = 0.0
ell = 0.0
alb = 0.0
nplanets = 1
sol = np.array([rho,ld1,ld2,ld3,ld4,
dil,veloffset,zpt,T0,per,b,rprs,ecosw,esinw,
rvamp,occ,ell,alb])
tmodout = tmod.transitmodel(nplanets,sol,time,itime,
ntt,tobs,omc,datatype) - 1.0
return tmodout
def testtom(t,num):
rho,zpt,teff,logg,feh,T0,per,b,rprs,ecosw,esinw = (t[...,num])
mod = model_real_paramprior(rho,zpt,teff,logg,feh,T0,per,b,rprs,ecosw,
esinw,M.time,M._itime,M._ntt,M._tobs,M._omc,M._datatype,
n_ldparams=2,ldfileloc='/Users/tom/svn_code/tom_code/')
q,f = get_qf(M.time,a,per,T0)
plt.plot(q,f,alpha=0.5)
def run_crap(t):
for num in random.choice(np.arange(len(t[1])),size=10):
testtom(t,num)
q,f = get_qf(M.time,M.flux,per,T0)
plt.scatter(q,f,s=1,color='k',alpha=0.2)
def get_qf(time,flux,period,epoch):
date1 = (time - epoch) + 0.5*period
phi1 = (((date1 / period) - np.floor(date1/period)) * 24. * period) - 12*period
q1 = np.sort(phi1)
f1 = (flux[np.argsort(phi1)]) * 1.E6
return q1, f1
|
mit
|
SSG-DRD-IOT/commercial-iot-security-system
|
opencv/tutorials/imageProcessing/smoothing/convolution.py
|
1
|
1116
|
"""
Smoothing images
blur images with various low pass filters
apply custom-made filters to images (2D Convolution)
2D Convolution (image filtering)
images can be filtered with various low-pass filters (LPF), high-pass filters (HPF), ...
LPF helps remove noises, blur images
HPF filters help find edges in images
>> Actually correlation!
function:
cv2.filter2D() - convolve kernel with an image
"""
# 5x5 averaging filter kernel:
# K = (1/25) [1 1 1 1 1;
# 1 1 1 1 1;
# 1 1 1 1 1;
# 1 1 1 1 1;
# 1 1 1 1 1]
# operation: keep kernel above a pixel, add all 25 pixels below the kernel, take the avg, replace central pixel w/ new avg value
# continues operation for all pixels in the image
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('opencv_logo.png')
kernel = np.ones((5,5), np.float32)/25
dst = cv2.filter2D(img, -1, kernel)
plt.subplot(121), plt.imshow(img), plt.title('Original')
plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(dst), plt.title('Averaging')
plt.xticks([]), plt.yticks([])
plt.show()
|
mit
|
WindCanDie/spark
|
python/pyspark/sql/types.py
|
2
|
67075
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import decimal
import time
import datetime
import calendar
import json
import re
import base64
from array import array
import ctypes
if sys.version >= "3":
long = int
basestring = unicode = str
from py4j.protocol import register_input_converter
from py4j.java_gateway import JavaClass
from pyspark import SparkContext
from pyspark.serializers import CloudPickleSerializer
__all__ = [
"DataType", "NullType", "StringType", "BinaryType", "BooleanType", "DateType",
"TimestampType", "DecimalType", "DoubleType", "FloatType", "ByteType", "IntegerType",
"LongType", "ShortType", "ArrayType", "MapType", "StructField", "StructType"]
class DataType(object):
"""Base class for data types."""
def __repr__(self):
return self.__class__.__name__
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def typeName(cls):
return cls.__name__[:-4].lower()
def simpleString(self):
return self.typeName()
def jsonValue(self):
return self.typeName()
def json(self):
return json.dumps(self.jsonValue(),
separators=(',', ':'),
sort_keys=True)
def needConversion(self):
"""
Does this type need to conversion between Python object and internal SQL object.
This is used to avoid the unnecessary conversion for ArrayType/MapType/StructType.
"""
return False
def toInternal(self, obj):
"""
Converts a Python object into an internal SQL object.
"""
return obj
def fromInternal(self, obj):
"""
Converts an internal SQL object into a native Python object.
"""
return obj
# This singleton pattern does not work with pickle, you will get
# another object after pickle and unpickle
class DataTypeSingleton(type):
"""Metaclass for DataType"""
_instances = {}
def __call__(cls):
if cls not in cls._instances:
cls._instances[cls] = super(DataTypeSingleton, cls).__call__()
return cls._instances[cls]
class NullType(DataType):
"""Null type.
The data type representing None, used for the types that cannot be inferred.
"""
__metaclass__ = DataTypeSingleton
class AtomicType(DataType):
"""An internal type used to represent everything that is not
null, UDTs, arrays, structs, and maps."""
class NumericType(AtomicType):
"""Numeric data types.
"""
class IntegralType(NumericType):
"""Integral data types.
"""
__metaclass__ = DataTypeSingleton
class FractionalType(NumericType):
"""Fractional data types.
"""
class StringType(AtomicType):
"""String data type.
"""
__metaclass__ = DataTypeSingleton
class BinaryType(AtomicType):
"""Binary (byte array) data type.
"""
__metaclass__ = DataTypeSingleton
class BooleanType(AtomicType):
"""Boolean data type.
"""
__metaclass__ = DataTypeSingleton
class DateType(AtomicType):
"""Date (datetime.date) data type.
"""
__metaclass__ = DataTypeSingleton
EPOCH_ORDINAL = datetime.datetime(1970, 1, 1).toordinal()
def needConversion(self):
return True
def toInternal(self, d):
if d is not None:
return d.toordinal() - self.EPOCH_ORDINAL
def fromInternal(self, v):
if v is not None:
return datetime.date.fromordinal(v + self.EPOCH_ORDINAL)
class TimestampType(AtomicType):
"""Timestamp (datetime.datetime) data type.
"""
__metaclass__ = DataTypeSingleton
def needConversion(self):
return True
def toInternal(self, dt):
if dt is not None:
seconds = (calendar.timegm(dt.utctimetuple()) if dt.tzinfo
else time.mktime(dt.timetuple()))
return int(seconds) * 1000000 + dt.microsecond
def fromInternal(self, ts):
if ts is not None:
# using int to avoid precision loss in float
return datetime.datetime.fromtimestamp(ts // 1000000).replace(microsecond=ts % 1000000)
class DecimalType(FractionalType):
"""Decimal (decimal.Decimal) data type.
The DecimalType must have fixed precision (the maximum total number of digits)
and scale (the number of digits on the right of dot). For example, (5, 2) can
support the value from [-999.99 to 999.99].
The precision can be up to 38, the scale must be less or equal to precision.
When create a DecimalType, the default precision and scale is (10, 0). When infer
schema from decimal.Decimal objects, it will be DecimalType(38, 18).
:param precision: the maximum total number of digits (default: 10)
:param scale: the number of digits on right side of dot. (default: 0)
"""
def __init__(self, precision=10, scale=0):
self.precision = precision
self.scale = scale
self.hasPrecisionInfo = True # this is public API
def simpleString(self):
return "decimal(%d,%d)" % (self.precision, self.scale)
def jsonValue(self):
return "decimal(%d,%d)" % (self.precision, self.scale)
def __repr__(self):
return "DecimalType(%d,%d)" % (self.precision, self.scale)
class DoubleType(FractionalType):
"""Double data type, representing double precision floats.
"""
__metaclass__ = DataTypeSingleton
class FloatType(FractionalType):
"""Float data type, representing single precision floats.
"""
__metaclass__ = DataTypeSingleton
class ByteType(IntegralType):
"""Byte data type, i.e. a signed integer in a single byte.
"""
def simpleString(self):
return 'tinyint'
class IntegerType(IntegralType):
"""Int data type, i.e. a signed 32-bit integer.
"""
def simpleString(self):
return 'int'
class LongType(IntegralType):
"""Long data type, i.e. a signed 64-bit integer.
If the values are beyond the range of [-9223372036854775808, 9223372036854775807],
please use :class:`DecimalType`.
"""
def simpleString(self):
return 'bigint'
class ShortType(IntegralType):
"""Short data type, i.e. a signed 16-bit integer.
"""
def simpleString(self):
return 'smallint'
class ArrayType(DataType):
"""Array data type.
:param elementType: :class:`DataType` of each element in the array.
:param containsNull: boolean, whether the array can contain null (None) values.
"""
def __init__(self, elementType, containsNull=True):
"""
>>> ArrayType(StringType()) == ArrayType(StringType(), True)
True
>>> ArrayType(StringType(), False) == ArrayType(StringType())
False
"""
assert isinstance(elementType, DataType),\
"elementType %s should be an instance of %s" % (elementType, DataType)
self.elementType = elementType
self.containsNull = containsNull
def simpleString(self):
return 'array<%s>' % self.elementType.simpleString()
def __repr__(self):
return "ArrayType(%s,%s)" % (self.elementType,
str(self.containsNull).lower())
def jsonValue(self):
return {"type": self.typeName(),
"elementType": self.elementType.jsonValue(),
"containsNull": self.containsNull}
@classmethod
def fromJson(cls, json):
return ArrayType(_parse_datatype_json_value(json["elementType"]),
json["containsNull"])
def needConversion(self):
return self.elementType.needConversion()
def toInternal(self, obj):
if not self.needConversion():
return obj
return obj and [self.elementType.toInternal(v) for v in obj]
def fromInternal(self, obj):
if not self.needConversion():
return obj
return obj and [self.elementType.fromInternal(v) for v in obj]
class MapType(DataType):
"""Map data type.
:param keyType: :class:`DataType` of the keys in the map.
:param valueType: :class:`DataType` of the values in the map.
:param valueContainsNull: indicates whether values can contain null (None) values.
Keys in a map data type are not allowed to be null (None).
"""
def __init__(self, keyType, valueType, valueContainsNull=True):
"""
>>> (MapType(StringType(), IntegerType())
... == MapType(StringType(), IntegerType(), True))
True
>>> (MapType(StringType(), IntegerType(), False)
... == MapType(StringType(), FloatType()))
False
"""
assert isinstance(keyType, DataType),\
"keyType %s should be an instance of %s" % (keyType, DataType)
assert isinstance(valueType, DataType),\
"valueType %s should be an instance of %s" % (valueType, DataType)
self.keyType = keyType
self.valueType = valueType
self.valueContainsNull = valueContainsNull
def simpleString(self):
return 'map<%s,%s>' % (self.keyType.simpleString(), self.valueType.simpleString())
def __repr__(self):
return "MapType(%s,%s,%s)" % (self.keyType, self.valueType,
str(self.valueContainsNull).lower())
def jsonValue(self):
return {"type": self.typeName(),
"keyType": self.keyType.jsonValue(),
"valueType": self.valueType.jsonValue(),
"valueContainsNull": self.valueContainsNull}
@classmethod
def fromJson(cls, json):
return MapType(_parse_datatype_json_value(json["keyType"]),
_parse_datatype_json_value(json["valueType"]),
json["valueContainsNull"])
def needConversion(self):
return self.keyType.needConversion() or self.valueType.needConversion()
def toInternal(self, obj):
if not self.needConversion():
return obj
return obj and dict((self.keyType.toInternal(k), self.valueType.toInternal(v))
for k, v in obj.items())
def fromInternal(self, obj):
if not self.needConversion():
return obj
return obj and dict((self.keyType.fromInternal(k), self.valueType.fromInternal(v))
for k, v in obj.items())
class StructField(DataType):
"""A field in :class:`StructType`.
:param name: string, name of the field.
:param dataType: :class:`DataType` of the field.
:param nullable: boolean, whether the field can be null (None) or not.
:param metadata: a dict from string to simple type that can be toInternald to JSON automatically
"""
def __init__(self, name, dataType, nullable=True, metadata=None):
"""
>>> (StructField("f1", StringType(), True)
... == StructField("f1", StringType(), True))
True
>>> (StructField("f1", StringType(), True)
... == StructField("f2", StringType(), True))
False
"""
assert isinstance(dataType, DataType),\
"dataType %s should be an instance of %s" % (dataType, DataType)
assert isinstance(name, basestring), "field name %s should be string" % (name)
if not isinstance(name, str):
name = name.encode('utf-8')
self.name = name
self.dataType = dataType
self.nullable = nullable
self.metadata = metadata or {}
def simpleString(self):
return '%s:%s' % (self.name, self.dataType.simpleString())
def __repr__(self):
return "StructField(%s,%s,%s)" % (self.name, self.dataType,
str(self.nullable).lower())
def jsonValue(self):
return {"name": self.name,
"type": self.dataType.jsonValue(),
"nullable": self.nullable,
"metadata": self.metadata}
@classmethod
def fromJson(cls, json):
return StructField(json["name"],
_parse_datatype_json_value(json["type"]),
json["nullable"],
json["metadata"])
def needConversion(self):
return self.dataType.needConversion()
def toInternal(self, obj):
return self.dataType.toInternal(obj)
def fromInternal(self, obj):
return self.dataType.fromInternal(obj)
def typeName(self):
raise TypeError(
"StructField does not have typeName. "
"Use typeName on its type explicitly instead.")
class StructType(DataType):
"""Struct type, consisting of a list of :class:`StructField`.
This is the data type representing a :class:`Row`.
Iterating a :class:`StructType` will iterate its :class:`StructField`\\s.
A contained :class:`StructField` can be accessed by name or position.
>>> struct1 = StructType([StructField("f1", StringType(), True)])
>>> struct1["f1"]
StructField(f1,StringType,true)
>>> struct1[0]
StructField(f1,StringType,true)
"""
def __init__(self, fields=None):
"""
>>> struct1 = StructType([StructField("f1", StringType(), True)])
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
>>> struct1 = StructType([StructField("f1", StringType(), True)])
>>> struct2 = StructType([StructField("f1", StringType(), True),
... StructField("f2", IntegerType(), False)])
>>> struct1 == struct2
False
"""
if not fields:
self.fields = []
self.names = []
else:
self.fields = fields
self.names = [f.name for f in fields]
assert all(isinstance(f, StructField) for f in fields),\
"fields should be a list of StructField"
# Precalculated list of fields that need conversion with fromInternal/toInternal functions
self._needConversion = [f.needConversion() for f in self]
self._needSerializeAnyField = any(self._needConversion)
def add(self, field, data_type=None, nullable=True, metadata=None):
"""
Construct a StructType by adding new elements to it to define the schema. The method accepts
either:
a) A single parameter which is a StructField object.
b) Between 2 and 4 parameters as (name, data_type, nullable (optional),
metadata(optional). The data_type parameter may be either a String or a
DataType object.
>>> struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
>>> struct2 = StructType([StructField("f1", StringType(), True), \\
... StructField("f2", StringType(), True, None)])
>>> struct1 == struct2
True
>>> struct1 = StructType().add(StructField("f1", StringType(), True))
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
>>> struct1 = StructType().add("f1", "string", True)
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
:param field: Either the name of the field or a StructField object
:param data_type: If present, the DataType of the StructField to create
:param nullable: Whether the field to add should be nullable (default True)
:param metadata: Any additional metadata (default None)
:return: a new updated StructType
"""
if isinstance(field, StructField):
self.fields.append(field)
self.names.append(field.name)
else:
if isinstance(field, str) and data_type is None:
raise ValueError("Must specify DataType if passing name of struct_field to create.")
if isinstance(data_type, str):
data_type_f = _parse_datatype_json_value(data_type)
else:
data_type_f = data_type
self.fields.append(StructField(field, data_type_f, nullable, metadata))
self.names.append(field)
# Precalculated list of fields that need conversion with fromInternal/toInternal functions
self._needConversion = [f.needConversion() for f in self]
self._needSerializeAnyField = any(self._needConversion)
return self
def __iter__(self):
"""Iterate the fields"""
return iter(self.fields)
def __len__(self):
"""Return the number of fields."""
return len(self.fields)
def __getitem__(self, key):
"""Access fields by name or slice."""
if isinstance(key, str):
for field in self:
if field.name == key:
return field
raise KeyError('No StructField named {0}'.format(key))
elif isinstance(key, int):
try:
return self.fields[key]
except IndexError:
raise IndexError('StructType index out of range')
elif isinstance(key, slice):
return StructType(self.fields[key])
else:
raise TypeError('StructType keys should be strings, integers or slices')
def simpleString(self):
return 'struct<%s>' % (','.join(f.simpleString() for f in self))
def __repr__(self):
return ("StructType(List(%s))" %
",".join(str(field) for field in self))
def jsonValue(self):
return {"type": self.typeName(),
"fields": [f.jsonValue() for f in self]}
@classmethod
def fromJson(cls, json):
return StructType([StructField.fromJson(f) for f in json["fields"]])
def fieldNames(self):
"""
Returns all field names in a list.
>>> struct = StructType([StructField("f1", StringType(), True)])
>>> struct.fieldNames()
['f1']
"""
return list(self.names)
def needConversion(self):
# We need convert Row()/namedtuple into tuple()
return True
def toInternal(self, obj):
if obj is None:
return
if self._needSerializeAnyField:
# Only calling toInternal function for fields that need conversion
if isinstance(obj, dict):
return tuple(f.toInternal(obj.get(n)) if c else obj.get(n)
for n, f, c in zip(self.names, self.fields, self._needConversion))
elif isinstance(obj, (tuple, list)):
return tuple(f.toInternal(v) if c else v
for f, v, c in zip(self.fields, obj, self._needConversion))
elif hasattr(obj, "__dict__"):
d = obj.__dict__
return tuple(f.toInternal(d.get(n)) if c else d.get(n)
for n, f, c in zip(self.names, self.fields, self._needConversion))
else:
raise ValueError("Unexpected tuple %r with StructType" % obj)
else:
if isinstance(obj, dict):
return tuple(obj.get(n) for n in self.names)
elif isinstance(obj, Row) and getattr(obj, "__from_dict__", False):
return tuple(obj[n] for n in self.names)
elif isinstance(obj, (list, tuple)):
return tuple(obj)
elif hasattr(obj, "__dict__"):
d = obj.__dict__
return tuple(d.get(n) for n in self.names)
else:
raise ValueError("Unexpected tuple %r with StructType" % obj)
def fromInternal(self, obj):
if obj is None:
return
if isinstance(obj, Row):
# it's already converted by pickler
return obj
if self._needSerializeAnyField:
# Only calling fromInternal function for fields that need conversion
values = [f.fromInternal(v) if c else v
for f, v, c in zip(self.fields, obj, self._needConversion)]
else:
values = obj
return _create_row(self.names, values)
class UserDefinedType(DataType):
"""User-defined type (UDT).
.. note:: WARN: Spark Internal Use Only
"""
@classmethod
def typeName(cls):
return cls.__name__.lower()
@classmethod
def sqlType(cls):
"""
Underlying SQL storage type for this UDT.
"""
raise NotImplementedError("UDT must implement sqlType().")
@classmethod
def module(cls):
"""
The Python module of the UDT.
"""
raise NotImplementedError("UDT must implement module().")
@classmethod
def scalaUDT(cls):
"""
The class name of the paired Scala UDT (could be '', if there
is no corresponding one).
"""
return ''
def needConversion(self):
return True
@classmethod
def _cachedSqlType(cls):
"""
Cache the sqlType() into class, because it's heavy used in `toInternal`.
"""
if not hasattr(cls, "_cached_sql_type"):
cls._cached_sql_type = cls.sqlType()
return cls._cached_sql_type
def toInternal(self, obj):
if obj is not None:
return self._cachedSqlType().toInternal(self.serialize(obj))
def fromInternal(self, obj):
v = self._cachedSqlType().fromInternal(obj)
if v is not None:
return self.deserialize(v)
def serialize(self, obj):
"""
Converts the a user-type object into a SQL datum.
"""
raise NotImplementedError("UDT must implement toInternal().")
def deserialize(self, datum):
"""
Converts a SQL datum into a user-type object.
"""
raise NotImplementedError("UDT must implement fromInternal().")
def simpleString(self):
return 'udt'
def json(self):
return json.dumps(self.jsonValue(), separators=(',', ':'), sort_keys=True)
def jsonValue(self):
if self.scalaUDT():
assert self.module() != '__main__', 'UDT in __main__ cannot work with ScalaUDT'
schema = {
"type": "udt",
"class": self.scalaUDT(),
"pyClass": "%s.%s" % (self.module(), type(self).__name__),
"sqlType": self.sqlType().jsonValue()
}
else:
ser = CloudPickleSerializer()
b = ser.dumps(type(self))
schema = {
"type": "udt",
"pyClass": "%s.%s" % (self.module(), type(self).__name__),
"serializedClass": base64.b64encode(b).decode('utf8'),
"sqlType": self.sqlType().jsonValue()
}
return schema
@classmethod
def fromJson(cls, json):
pyUDT = str(json["pyClass"]) # convert unicode to str
split = pyUDT.rfind(".")
pyModule = pyUDT[:split]
pyClass = pyUDT[split+1:]
m = __import__(pyModule, globals(), locals(), [pyClass])
if not hasattr(m, pyClass):
s = base64.b64decode(json['serializedClass'].encode('utf-8'))
UDT = CloudPickleSerializer().loads(s)
else:
UDT = getattr(m, pyClass)
return UDT()
def __eq__(self, other):
return type(self) == type(other)
_atomic_types = [StringType, BinaryType, BooleanType, DecimalType, FloatType, DoubleType,
ByteType, ShortType, IntegerType, LongType, DateType, TimestampType, NullType]
_all_atomic_types = dict((t.typeName(), t) for t in _atomic_types)
_all_complex_types = dict((v.typeName(), v)
for v in [ArrayType, MapType, StructType])
_FIXED_DECIMAL = re.compile(r"decimal\(\s*(\d+)\s*,\s*(-?\d+)\s*\)")
def _parse_datatype_string(s):
"""
Parses the given data type string to a :class:`DataType`. The data type string format equals
to :class:`DataType.simpleString`, except that top level struct type can omit
the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use ``byte`` instead
of ``tinyint`` for :class:`ByteType`. We can also use ``int`` as a short name
for :class:`IntegerType`. Since Spark 2.3, this also supports a schema in a DDL-formatted
string and case-insensitive strings.
>>> _parse_datatype_string("int ")
IntegerType
>>> _parse_datatype_string("INT ")
IntegerType
>>> _parse_datatype_string("a: byte, b: decimal( 16 , 8 ) ")
StructType(List(StructField(a,ByteType,true),StructField(b,DecimalType(16,8),true)))
>>> _parse_datatype_string("a DOUBLE, b STRING")
StructType(List(StructField(a,DoubleType,true),StructField(b,StringType,true)))
>>> _parse_datatype_string("a: array< short>")
StructType(List(StructField(a,ArrayType(ShortType,true),true)))
>>> _parse_datatype_string(" map<string , string > ")
MapType(StringType,StringType,true)
>>> # Error cases
>>> _parse_datatype_string("blabla") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseException:...
>>> _parse_datatype_string("a: int,") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseException:...
>>> _parse_datatype_string("array<int") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseException:...
>>> _parse_datatype_string("map<int, boolean>>") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseException:...
"""
sc = SparkContext._active_spark_context
def from_ddl_schema(type_str):
return _parse_datatype_json_string(
sc._jvm.org.apache.spark.sql.types.StructType.fromDDL(type_str).json())
def from_ddl_datatype(type_str):
return _parse_datatype_json_string(
sc._jvm.org.apache.spark.sql.api.python.PythonSQLUtils.parseDataType(type_str).json())
try:
# DDL format, "fieldname datatype, fieldname datatype".
return from_ddl_schema(s)
except Exception as e:
try:
# For backwards compatibility, "integer", "struct<fieldname: datatype>" and etc.
return from_ddl_datatype(s)
except:
try:
# For backwards compatibility, "fieldname: datatype, fieldname: datatype" case.
return from_ddl_datatype("struct<%s>" % s.strip())
except:
raise e
def _parse_datatype_json_string(json_string):
"""Parses the given data type JSON string.
>>> import pickle
>>> def check_datatype(datatype):
... pickled = pickle.loads(pickle.dumps(datatype))
... assert datatype == pickled
... scala_datatype = spark._jsparkSession.parseDataType(datatype.json())
... python_datatype = _parse_datatype_json_string(scala_datatype.json())
... assert datatype == python_datatype
>>> for cls in _all_atomic_types.values():
... check_datatype(cls())
>>> # Simple ArrayType.
>>> simple_arraytype = ArrayType(StringType(), True)
>>> check_datatype(simple_arraytype)
>>> # Simple MapType.
>>> simple_maptype = MapType(StringType(), LongType())
>>> check_datatype(simple_maptype)
>>> # Simple StructType.
>>> simple_structtype = StructType([
... StructField("a", DecimalType(), False),
... StructField("b", BooleanType(), True),
... StructField("c", LongType(), True),
... StructField("d", BinaryType(), False)])
>>> check_datatype(simple_structtype)
>>> # Complex StructType.
>>> complex_structtype = StructType([
... StructField("simpleArray", simple_arraytype, True),
... StructField("simpleMap", simple_maptype, True),
... StructField("simpleStruct", simple_structtype, True),
... StructField("boolean", BooleanType(), False),
... StructField("withMeta", DoubleType(), False, {"name": "age"})])
>>> check_datatype(complex_structtype)
>>> # Complex ArrayType.
>>> complex_arraytype = ArrayType(complex_structtype, True)
>>> check_datatype(complex_arraytype)
>>> # Complex MapType.
>>> complex_maptype = MapType(complex_structtype,
... complex_arraytype, False)
>>> check_datatype(complex_maptype)
>>> # Decimal with negative scale.
>>> check_datatype(DecimalType(1,-1))
"""
return _parse_datatype_json_value(json.loads(json_string))
def _parse_datatype_json_value(json_value):
if not isinstance(json_value, dict):
if json_value in _all_atomic_types.keys():
return _all_atomic_types[json_value]()
elif json_value == 'decimal':
return DecimalType()
elif _FIXED_DECIMAL.match(json_value):
m = _FIXED_DECIMAL.match(json_value)
return DecimalType(int(m.group(1)), int(m.group(2)))
else:
raise ValueError("Could not parse datatype: %s" % json_value)
else:
tpe = json_value["type"]
if tpe in _all_complex_types:
return _all_complex_types[tpe].fromJson(json_value)
elif tpe == 'udt':
return UserDefinedType.fromJson(json_value)
else:
raise ValueError("not supported type: %s" % tpe)
# Mapping Python types to Spark SQL DataType
_type_mappings = {
type(None): NullType,
bool: BooleanType,
int: LongType,
float: DoubleType,
str: StringType,
bytearray: BinaryType,
decimal.Decimal: DecimalType,
datetime.date: DateType,
datetime.datetime: TimestampType,
datetime.time: TimestampType,
}
if sys.version < "3":
_type_mappings.update({
unicode: StringType,
long: LongType,
})
# Mapping Python array types to Spark SQL DataType
# We should be careful here. The size of these types in python depends on C
# implementation. We need to make sure that this conversion does not lose any
# precision. Also, JVM only support signed types, when converting unsigned types,
# keep in mind that it required 1 more bit when stored as singed types.
#
# Reference for C integer size, see:
# ISO/IEC 9899:201x specification, chapter 5.2.4.2.1 Sizes of integer types <limits.h>.
# Reference for python array typecode, see:
# https://docs.python.org/2/library/array.html
# https://docs.python.org/3.6/library/array.html
# Reference for JVM's supported integral types:
# http://docs.oracle.com/javase/specs/jvms/se8/html/jvms-2.html#jvms-2.3.1
_array_signed_int_typecode_ctype_mappings = {
'b': ctypes.c_byte,
'h': ctypes.c_short,
'i': ctypes.c_int,
'l': ctypes.c_long,
}
_array_unsigned_int_typecode_ctype_mappings = {
'B': ctypes.c_ubyte,
'H': ctypes.c_ushort,
'I': ctypes.c_uint,
'L': ctypes.c_ulong
}
def _int_size_to_type(size):
"""
Return the Catalyst datatype from the size of integers.
"""
if size <= 8:
return ByteType
if size <= 16:
return ShortType
if size <= 32:
return IntegerType
if size <= 64:
return LongType
# The list of all supported array typecodes is stored here
_array_type_mappings = {
# Warning: Actual properties for float and double in C is not specified in C.
# On almost every system supported by both python and JVM, they are IEEE 754
# single-precision binary floating-point format and IEEE 754 double-precision
# binary floating-point format. And we do assume the same thing here for now.
'f': FloatType,
'd': DoubleType
}
# compute array typecode mappings for signed integer types
for _typecode in _array_signed_int_typecode_ctype_mappings.keys():
size = ctypes.sizeof(_array_signed_int_typecode_ctype_mappings[_typecode]) * 8
dt = _int_size_to_type(size)
if dt is not None:
_array_type_mappings[_typecode] = dt
# compute array typecode mappings for unsigned integer types
for _typecode in _array_unsigned_int_typecode_ctype_mappings.keys():
# JVM does not have unsigned types, so use signed types that is at least 1
# bit larger to store
size = ctypes.sizeof(_array_unsigned_int_typecode_ctype_mappings[_typecode]) * 8 + 1
dt = _int_size_to_type(size)
if dt is not None:
_array_type_mappings[_typecode] = dt
# Type code 'u' in Python's array is deprecated since version 3.3, and will be
# removed in version 4.0. See: https://docs.python.org/3/library/array.html
if sys.version_info[0] < 4:
_array_type_mappings['u'] = StringType
# Type code 'c' are only available at python 2
if sys.version_info[0] < 3:
_array_type_mappings['c'] = StringType
# SPARK-21465:
# In python2, array of 'L' happened to be mistakenly partially supported. To
# avoid breaking user's code, we should keep this partial support. Below is a
# dirty hacking to keep this partial support and make the unit test passes
import platform
if sys.version_info[0] < 3 and platform.python_implementation() != 'PyPy':
if 'L' not in _array_type_mappings.keys():
_array_type_mappings['L'] = LongType
_array_unsigned_int_typecode_ctype_mappings['L'] = ctypes.c_uint
def _infer_type(obj):
"""Infer the DataType from obj
"""
if obj is None:
return NullType()
if hasattr(obj, '__UDT__'):
return obj.__UDT__
dataType = _type_mappings.get(type(obj))
if dataType is DecimalType:
# the precision and scale of `obj` may be different from row to row.
return DecimalType(38, 18)
elif dataType is not None:
return dataType()
if isinstance(obj, dict):
for key, value in obj.items():
if key is not None and value is not None:
return MapType(_infer_type(key), _infer_type(value), True)
return MapType(NullType(), NullType(), True)
elif isinstance(obj, list):
for v in obj:
if v is not None:
return ArrayType(_infer_type(obj[0]), True)
return ArrayType(NullType(), True)
elif isinstance(obj, array):
if obj.typecode in _array_type_mappings:
return ArrayType(_array_type_mappings[obj.typecode](), False)
else:
raise TypeError("not supported type: array(%s)" % obj.typecode)
else:
try:
return _infer_schema(obj)
except TypeError:
raise TypeError("not supported type: %s" % type(obj))
def _infer_schema(row, names=None):
"""Infer the schema from dict/namedtuple/object"""
if isinstance(row, dict):
items = sorted(row.items())
elif isinstance(row, (tuple, list)):
if hasattr(row, "__fields__"): # Row
items = zip(row.__fields__, tuple(row))
elif hasattr(row, "_fields"): # namedtuple
items = zip(row._fields, tuple(row))
else:
if names is None:
names = ['_%d' % i for i in range(1, len(row) + 1)]
elif len(names) < len(row):
names.extend('_%d' % i for i in range(len(names) + 1, len(row) + 1))
items = zip(names, row)
elif hasattr(row, "__dict__"): # object
items = sorted(row.__dict__.items())
else:
raise TypeError("Can not infer schema for type: %s" % type(row))
fields = [StructField(k, _infer_type(v), True) for k, v in items]
return StructType(fields)
def _has_nulltype(dt):
""" Return whether there is NullType in `dt` or not """
if isinstance(dt, StructType):
return any(_has_nulltype(f.dataType) for f in dt.fields)
elif isinstance(dt, ArrayType):
return _has_nulltype((dt.elementType))
elif isinstance(dt, MapType):
return _has_nulltype(dt.keyType) or _has_nulltype(dt.valueType)
else:
return isinstance(dt, NullType)
def _merge_type(a, b, name=None):
if name is None:
new_msg = lambda msg: msg
new_name = lambda n: "field %s" % n
else:
new_msg = lambda msg: "%s: %s" % (name, msg)
new_name = lambda n: "field %s in %s" % (n, name)
if isinstance(a, NullType):
return b
elif isinstance(b, NullType):
return a
elif type(a) is not type(b):
# TODO: type cast (such as int -> long)
raise TypeError(new_msg("Can not merge type %s and %s" % (type(a), type(b))))
# same type
if isinstance(a, StructType):
nfs = dict((f.name, f.dataType) for f in b.fields)
fields = [StructField(f.name, _merge_type(f.dataType, nfs.get(f.name, NullType()),
name=new_name(f.name)))
for f in a.fields]
names = set([f.name for f in fields])
for n in nfs:
if n not in names:
fields.append(StructField(n, nfs[n]))
return StructType(fields)
elif isinstance(a, ArrayType):
return ArrayType(_merge_type(a.elementType, b.elementType,
name='element in array %s' % name), True)
elif isinstance(a, MapType):
return MapType(_merge_type(a.keyType, b.keyType, name='key of map %s' % name),
_merge_type(a.valueType, b.valueType, name='value of map %s' % name),
True)
else:
return a
def _need_converter(dataType):
if isinstance(dataType, StructType):
return True
elif isinstance(dataType, ArrayType):
return _need_converter(dataType.elementType)
elif isinstance(dataType, MapType):
return _need_converter(dataType.keyType) or _need_converter(dataType.valueType)
elif isinstance(dataType, NullType):
return True
else:
return False
def _create_converter(dataType):
"""Create a converter to drop the names of fields in obj """
if not _need_converter(dataType):
return lambda x: x
if isinstance(dataType, ArrayType):
conv = _create_converter(dataType.elementType)
return lambda row: [conv(v) for v in row]
elif isinstance(dataType, MapType):
kconv = _create_converter(dataType.keyType)
vconv = _create_converter(dataType.valueType)
return lambda row: dict((kconv(k), vconv(v)) for k, v in row.items())
elif isinstance(dataType, NullType):
return lambda x: None
elif not isinstance(dataType, StructType):
return lambda x: x
# dataType must be StructType
names = [f.name for f in dataType.fields]
converters = [_create_converter(f.dataType) for f in dataType.fields]
convert_fields = any(_need_converter(f.dataType) for f in dataType.fields)
def convert_struct(obj):
if obj is None:
return
if isinstance(obj, (tuple, list)):
if convert_fields:
return tuple(conv(v) for v, conv in zip(obj, converters))
else:
return tuple(obj)
if isinstance(obj, dict):
d = obj
elif hasattr(obj, "__dict__"): # object
d = obj.__dict__
else:
raise TypeError("Unexpected obj type: %s" % type(obj))
if convert_fields:
return tuple([conv(d.get(name)) for name, conv in zip(names, converters)])
else:
return tuple([d.get(name) for name in names])
return convert_struct
_acceptable_types = {
BooleanType: (bool,),
ByteType: (int, long),
ShortType: (int, long),
IntegerType: (int, long),
LongType: (int, long),
FloatType: (float,),
DoubleType: (float,),
DecimalType: (decimal.Decimal,),
StringType: (str, unicode),
BinaryType: (bytearray,),
DateType: (datetime.date, datetime.datetime),
TimestampType: (datetime.datetime,),
ArrayType: (list, tuple, array),
MapType: (dict,),
StructType: (tuple, list, dict),
}
def _make_type_verifier(dataType, nullable=True, name=None):
"""
Make a verifier that checks the type of obj against dataType and raises a TypeError if they do
not match.
This verifier also checks the value of obj against datatype and raises a ValueError if it's not
within the allowed range, e.g. using 128 as ByteType will overflow. Note that, Python float is
not checked, so it will become infinity when cast to Java float if it overflows.
>>> _make_type_verifier(StructType([]))(None)
>>> _make_type_verifier(StringType())("")
>>> _make_type_verifier(LongType())(0)
>>> _make_type_verifier(ArrayType(ShortType()))(list(range(3)))
>>> _make_type_verifier(ArrayType(StringType()))(set()) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError:...
>>> _make_type_verifier(MapType(StringType(), IntegerType()))({})
>>> _make_type_verifier(StructType([]))(())
>>> _make_type_verifier(StructType([]))([])
>>> _make_type_verifier(StructType([]))([1]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> # Check if numeric values are within the allowed range.
>>> _make_type_verifier(ByteType())(12)
>>> _make_type_verifier(ByteType())(1234) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> _make_type_verifier(ByteType(), False)(None) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> _make_type_verifier(
... ArrayType(ShortType(), False))([1, None]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> _make_type_verifier(MapType(StringType(), IntegerType()))({None: 1})
Traceback (most recent call last):
...
ValueError:...
>>> schema = StructType().add("a", IntegerType()).add("b", StringType(), False)
>>> _make_type_verifier(schema)((1, None)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
"""
if name is None:
new_msg = lambda msg: msg
new_name = lambda n: "field %s" % n
else:
new_msg = lambda msg: "%s: %s" % (name, msg)
new_name = lambda n: "field %s in %s" % (n, name)
def verify_nullability(obj):
if obj is None:
if nullable:
return True
else:
raise ValueError(new_msg("This field is not nullable, but got None"))
else:
return False
_type = type(dataType)
def assert_acceptable_types(obj):
assert _type in _acceptable_types, \
new_msg("unknown datatype: %s for object %r" % (dataType, obj))
def verify_acceptable_types(obj):
# subclass of them can not be fromInternal in JVM
if type(obj) not in _acceptable_types[_type]:
raise TypeError(new_msg("%s can not accept object %r in type %s"
% (dataType, obj, type(obj))))
if isinstance(dataType, StringType):
# StringType can work with any types
verify_value = lambda _: _
elif isinstance(dataType, UserDefinedType):
verifier = _make_type_verifier(dataType.sqlType(), name=name)
def verify_udf(obj):
if not (hasattr(obj, '__UDT__') and obj.__UDT__ == dataType):
raise ValueError(new_msg("%r is not an instance of type %r" % (obj, dataType)))
verifier(dataType.toInternal(obj))
verify_value = verify_udf
elif isinstance(dataType, ByteType):
def verify_byte(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
if obj < -128 or obj > 127:
raise ValueError(new_msg("object of ByteType out of range, got: %s" % obj))
verify_value = verify_byte
elif isinstance(dataType, ShortType):
def verify_short(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
if obj < -32768 or obj > 32767:
raise ValueError(new_msg("object of ShortType out of range, got: %s" % obj))
verify_value = verify_short
elif isinstance(dataType, IntegerType):
def verify_integer(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
if obj < -2147483648 or obj > 2147483647:
raise ValueError(
new_msg("object of IntegerType out of range, got: %s" % obj))
verify_value = verify_integer
elif isinstance(dataType, ArrayType):
element_verifier = _make_type_verifier(
dataType.elementType, dataType.containsNull, name="element in array %s" % name)
def verify_array(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
for i in obj:
element_verifier(i)
verify_value = verify_array
elif isinstance(dataType, MapType):
key_verifier = _make_type_verifier(dataType.keyType, False, name="key of map %s" % name)
value_verifier = _make_type_verifier(
dataType.valueType, dataType.valueContainsNull, name="value of map %s" % name)
def verify_map(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
for k, v in obj.items():
key_verifier(k)
value_verifier(v)
verify_value = verify_map
elif isinstance(dataType, StructType):
verifiers = []
for f in dataType.fields:
verifier = _make_type_verifier(f.dataType, f.nullable, name=new_name(f.name))
verifiers.append((f.name, verifier))
def verify_struct(obj):
assert_acceptable_types(obj)
if isinstance(obj, dict):
for f, verifier in verifiers:
verifier(obj.get(f))
elif isinstance(obj, Row) and getattr(obj, "__from_dict__", False):
# the order in obj could be different than dataType.fields
for f, verifier in verifiers:
verifier(obj[f])
elif isinstance(obj, (tuple, list)):
if len(obj) != len(verifiers):
raise ValueError(
new_msg("Length of object (%d) does not match with "
"length of fields (%d)" % (len(obj), len(verifiers))))
for v, (_, verifier) in zip(obj, verifiers):
verifier(v)
elif hasattr(obj, "__dict__"):
d = obj.__dict__
for f, verifier in verifiers:
verifier(d.get(f))
else:
raise TypeError(new_msg("StructType can not accept object %r in type %s"
% (obj, type(obj))))
verify_value = verify_struct
else:
def verify_default(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
verify_value = verify_default
def verify(obj):
if not verify_nullability(obj):
verify_value(obj)
return verify
# This is used to unpickle a Row from JVM
def _create_row_inbound_converter(dataType):
return lambda *a: dataType.fromInternal(a)
def _create_row(fields, values):
row = Row(*values)
row.__fields__ = fields
return row
class Row(tuple):
"""
A row in L{DataFrame}.
The fields in it can be accessed:
* like attributes (``row.key``)
* like dictionary values (``row[key]``)
``key in row`` will search through row keys.
Row can be used to create a row object by using named arguments,
the fields will be sorted by names. It is not allowed to omit
a named argument to represent the value is None or missing. This should be
explicitly set to None in this case.
>>> row = Row(name="Alice", age=11)
>>> row
Row(age=11, name='Alice')
>>> row['name'], row['age']
('Alice', 11)
>>> row.name, row.age
('Alice', 11)
>>> 'name' in row
True
>>> 'wrong_key' in row
False
Row also can be used to create another Row like class, then it
could be used to create Row objects, such as
>>> Person = Row("name", "age")
>>> Person
<Row(name, age)>
>>> 'name' in Person
True
>>> 'wrong_key' in Person
False
>>> Person("Alice", 11)
Row(name='Alice', age=11)
"""
def __new__(self, *args, **kwargs):
if args and kwargs:
raise ValueError("Can not use both args "
"and kwargs to create Row")
if kwargs:
# create row objects
names = sorted(kwargs.keys())
row = tuple.__new__(self, [kwargs[n] for n in names])
row.__fields__ = names
row.__from_dict__ = True
return row
else:
# create row class or objects
return tuple.__new__(self, args)
def asDict(self, recursive=False):
"""
Return as an dict
:param recursive: turns the nested Row as dict (default: False).
>>> Row(name="Alice", age=11).asDict() == {'name': 'Alice', 'age': 11}
True
>>> row = Row(key=1, value=Row(name='a', age=2))
>>> row.asDict() == {'key': 1, 'value': Row(age=2, name='a')}
True
>>> row.asDict(True) == {'key': 1, 'value': {'name': 'a', 'age': 2}}
True
"""
if not hasattr(self, "__fields__"):
raise TypeError("Cannot convert a Row class into dict")
if recursive:
def conv(obj):
if isinstance(obj, Row):
return obj.asDict(True)
elif isinstance(obj, list):
return [conv(o) for o in obj]
elif isinstance(obj, dict):
return dict((k, conv(v)) for k, v in obj.items())
else:
return obj
return dict(zip(self.__fields__, (conv(o) for o in self)))
else:
return dict(zip(self.__fields__, self))
def __contains__(self, item):
if hasattr(self, "__fields__"):
return item in self.__fields__
else:
return super(Row, self).__contains__(item)
# let object acts like class
def __call__(self, *args):
"""create new Row object"""
if len(args) > len(self):
raise ValueError("Can not create Row with fields %s, expected %d values "
"but got %s" % (self, len(self), args))
return _create_row(self, args)
def __getitem__(self, item):
if isinstance(item, (int, slice)):
return super(Row, self).__getitem__(item)
try:
# it will be slow when it has many fields,
# but this will not be used in normal cases
idx = self.__fields__.index(item)
return super(Row, self).__getitem__(idx)
except IndexError:
raise KeyError(item)
except ValueError:
raise ValueError(item)
def __getattr__(self, item):
if item.startswith("__"):
raise AttributeError(item)
try:
# it will be slow when it has many fields,
# but this will not be used in normal cases
idx = self.__fields__.index(item)
return self[idx]
except IndexError:
raise AttributeError(item)
except ValueError:
raise AttributeError(item)
def __setattr__(self, key, value):
if key != '__fields__' and key != "__from_dict__":
raise Exception("Row is read-only")
self.__dict__[key] = value
def __reduce__(self):
"""Returns a tuple so Python knows how to pickle Row."""
if hasattr(self, "__fields__"):
return (_create_row, (self.__fields__, tuple(self)))
else:
return tuple.__reduce__(self)
def __repr__(self):
"""Printable representation of Row used in Python REPL."""
if hasattr(self, "__fields__"):
return "Row(%s)" % ", ".join("%s=%r" % (k, v)
for k, v in zip(self.__fields__, tuple(self)))
else:
return "<Row(%s)>" % ", ".join(self)
class DateConverter(object):
def can_convert(self, obj):
return isinstance(obj, datetime.date)
def convert(self, obj, gateway_client):
Date = JavaClass("java.sql.Date", gateway_client)
return Date.valueOf(obj.strftime("%Y-%m-%d"))
class DatetimeConverter(object):
def can_convert(self, obj):
return isinstance(obj, datetime.datetime)
def convert(self, obj, gateway_client):
Timestamp = JavaClass("java.sql.Timestamp", gateway_client)
seconds = (calendar.timegm(obj.utctimetuple()) if obj.tzinfo
else time.mktime(obj.timetuple()))
t = Timestamp(int(seconds) * 1000)
t.setNanos(obj.microsecond * 1000)
return t
# datetime is a subclass of date, we should register DatetimeConverter first
register_input_converter(DatetimeConverter())
register_input_converter(DateConverter())
def to_arrow_type(dt):
""" Convert Spark data type to pyarrow type
"""
from distutils.version import LooseVersion
import pyarrow as pa
if type(dt) == BooleanType:
arrow_type = pa.bool_()
elif type(dt) == ByteType:
arrow_type = pa.int8()
elif type(dt) == ShortType:
arrow_type = pa.int16()
elif type(dt) == IntegerType:
arrow_type = pa.int32()
elif type(dt) == LongType:
arrow_type = pa.int64()
elif type(dt) == FloatType:
arrow_type = pa.float32()
elif type(dt) == DoubleType:
arrow_type = pa.float64()
elif type(dt) == DecimalType:
arrow_type = pa.decimal128(dt.precision, dt.scale)
elif type(dt) == StringType:
arrow_type = pa.string()
elif type(dt) == BinaryType:
# TODO: remove version check once minimum pyarrow version is 0.10.0
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt) +
"\nPlease install pyarrow >= 0.10.0 for BinaryType support.")
arrow_type = pa.binary()
elif type(dt) == DateType:
arrow_type = pa.date32()
elif type(dt) == TimestampType:
# Timestamps should be in UTC, JVM Arrow timestamps require a timezone to be read
arrow_type = pa.timestamp('us', tz='UTC')
elif type(dt) == ArrayType:
if type(dt.elementType) == TimestampType:
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt))
arrow_type = pa.list_(to_arrow_type(dt.elementType))
else:
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt))
return arrow_type
def to_arrow_schema(schema):
""" Convert a schema from Spark to Arrow
"""
import pyarrow as pa
fields = [pa.field(field.name, to_arrow_type(field.dataType), nullable=field.nullable)
for field in schema]
return pa.schema(fields)
def from_arrow_type(at):
""" Convert pyarrow type to Spark data type.
"""
from distutils.version import LooseVersion
import pyarrow as pa
import pyarrow.types as types
if types.is_boolean(at):
spark_type = BooleanType()
elif types.is_int8(at):
spark_type = ByteType()
elif types.is_int16(at):
spark_type = ShortType()
elif types.is_int32(at):
spark_type = IntegerType()
elif types.is_int64(at):
spark_type = LongType()
elif types.is_float32(at):
spark_type = FloatType()
elif types.is_float64(at):
spark_type = DoubleType()
elif types.is_decimal(at):
spark_type = DecimalType(precision=at.precision, scale=at.scale)
elif types.is_string(at):
spark_type = StringType()
elif types.is_binary(at):
# TODO: remove version check once minimum pyarrow version is 0.10.0
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
raise TypeError("Unsupported type in conversion from Arrow: " + str(at) +
"\nPlease install pyarrow >= 0.10.0 for BinaryType support.")
spark_type = BinaryType()
elif types.is_date32(at):
spark_type = DateType()
elif types.is_timestamp(at):
spark_type = TimestampType()
elif types.is_list(at):
if types.is_timestamp(at.value_type):
raise TypeError("Unsupported type in conversion from Arrow: " + str(at))
spark_type = ArrayType(from_arrow_type(at.value_type))
else:
raise TypeError("Unsupported type in conversion from Arrow: " + str(at))
return spark_type
def from_arrow_schema(arrow_schema):
""" Convert schema from Arrow to Spark.
"""
return StructType(
[StructField(field.name, from_arrow_type(field.type), nullable=field.nullable)
for field in arrow_schema])
def _arrow_column_to_pandas(column, data_type):
""" Convert Arrow Column to pandas Series.
:param series: pyarrow.lib.Column
:param data_type: a Spark data type for the column
"""
import pandas as pd
import pyarrow as pa
from distutils.version import LooseVersion
# If the given column is a date type column, creates a series of datetime.date directly instead
# of creating datetime64[ns] as intermediate data to avoid overflow caused by datetime64[ns]
# type handling.
if LooseVersion(pa.__version__) < LooseVersion("0.11.0"):
if type(data_type) == DateType:
return pd.Series(column.to_pylist(), name=column.name)
else:
return column.to_pandas()
else:
# Since Arrow 0.11.0, support date_as_object to return datetime.date instead of
# np.datetime64.
return column.to_pandas(date_as_object=True)
def _arrow_table_to_pandas(table, schema):
""" Convert Arrow Table to pandas DataFrame.
Pandas DataFrame created from PyArrow uses datetime64[ns] for date type values, but we should
use datetime.date to match the behavior with when Arrow optimization is disabled.
:param table: pyarrow.lib.Table
:param schema: a Spark schema of the pyarrow.lib.Table
"""
import pandas as pd
import pyarrow as pa
from distutils.version import LooseVersion
# If the given table contains a date type column, use `_arrow_column_to_pandas` for pyarrow<0.11
# or use `date_as_object` option for pyarrow>=0.11 to avoid creating datetime64[ns] as
# intermediate data.
if LooseVersion(pa.__version__) < LooseVersion("0.11.0"):
if any(type(field.dataType) == DateType for field in schema):
return pd.concat([_arrow_column_to_pandas(column, field.dataType)
for column, field in zip(table.itercolumns(), schema)], axis=1)
else:
return table.to_pandas()
else:
return table.to_pandas(date_as_object=True)
def _get_local_timezone():
""" Get local timezone using pytz with environment variable, or dateutil.
If there is a 'TZ' environment variable, pass it to pandas to use pytz and use it as timezone
string, otherwise use the special word 'dateutil/:' which means that pandas uses dateutil and
it reads system configuration to know the system local timezone.
See also:
- https://github.com/pandas-dev/pandas/blob/0.19.x/pandas/tslib.pyx#L1753
- https://github.com/dateutil/dateutil/blob/2.6.1/dateutil/tz/tz.py#L1338
"""
import os
return os.environ.get('TZ', 'dateutil/:')
def _check_series_localize_timestamps(s, timezone):
"""
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone.
If the input series is not a timestamp series, then the same series is returned. If the input
series is a timestamp series, then a converted series is returned.
:param s: pandas.Series
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.Series that have been converted to tz-naive
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
from pandas.api.types import is_datetime64tz_dtype
tz = timezone or _get_local_timezone()
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(tz).dt.tz_localize(None)
else:
return s
def _check_dataframe_localize_timestamps(pdf, timezone):
"""
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone
:param pdf: pandas.DataFrame
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.DataFrame where any timezone aware columns have been converted to tz-naive
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
for column, series in pdf.iteritems():
pdf[column] = _check_series_localize_timestamps(series, timezone)
return pdf
def _check_series_convert_timestamps_internal(s, timezone):
"""
Convert a tz-naive timestamp in the specified timezone or local timezone to UTC normalized for
Spark internal storage
:param s: a pandas.Series
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been UTC normalized without a time zone
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64_dtype(s.dtype):
# When tz_localize a tz-naive timestamp, the result is ambiguous if the tz-naive
# timestamp is during the hour when the clock is adjusted backward during due to
# daylight saving time (dst).
# E.g., for America/New_York, the clock is adjusted backward on 2015-11-01 2:00 to
# 2015-11-01 1:00 from dst-time to standard time, and therefore, when tz_localize
# a tz-naive timestamp 2015-11-01 1:30 with America/New_York timezone, it can be either
# dst time (2015-01-01 1:30-0400) or standard time (2015-11-01 1:30-0500).
#
# Here we explicit choose to use standard time. This matches the default behavior of
# pytz.
#
# Here are some code to help understand this behavior:
# >>> import datetime
# >>> import pandas as pd
# >>> import pytz
# >>>
# >>> t = datetime.datetime(2015, 11, 1, 1, 30)
# >>> ts = pd.Series([t])
# >>> tz = pytz.timezone('America/New_York')
# >>>
# >>> ts.dt.tz_localize(tz, ambiguous=True)
# 0 2015-11-01 01:30:00-04:00
# dtype: datetime64[ns, America/New_York]
# >>>
# >>> ts.dt.tz_localize(tz, ambiguous=False)
# 0 2015-11-01 01:30:00-05:00
# dtype: datetime64[ns, America/New_York]
# >>>
# >>> str(tz.localize(t))
# '2015-11-01 01:30:00-05:00'
tz = timezone or _get_local_timezone()
return s.dt.tz_localize(tz, ambiguous=False).dt.tz_convert('UTC')
elif is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert('UTC')
else:
return s
def _check_series_convert_timestamps_localize(s, from_timezone, to_timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
:param s: a pandas.Series
:param from_timezone: the timezone to convert from. if None then use local timezone
:param to_timezone: the timezone to convert to. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been converted to tz-naive
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
import pandas as pd
from pandas.api.types import is_datetime64tz_dtype, is_datetime64_dtype
from_tz = from_timezone or _get_local_timezone()
to_tz = to_timezone or _get_local_timezone()
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(to_tz).dt.tz_localize(None)
elif is_datetime64_dtype(s.dtype) and from_tz != to_tz:
# `s.dt.tz_localize('tzlocal()')` doesn't work properly when including NaT.
return s.apply(
lambda ts: ts.tz_localize(from_tz, ambiguous=False).tz_convert(to_tz).tz_localize(None)
if ts is not pd.NaT else pd.NaT)
else:
return s
def _check_series_convert_timestamps_local_tz(s, timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
:param s: a pandas.Series
:param timezone: the timezone to convert to. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been converted to tz-naive
"""
return _check_series_convert_timestamps_localize(s, None, timezone)
def _check_series_convert_timestamps_tz_local(s, timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
:param s: a pandas.Series
:param timezone: the timezone to convert from. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been converted to tz-naive
"""
return _check_series_convert_timestamps_localize(s, timezone, None)
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import SparkSession
globs = globals()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession.builder.getOrCreate()
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
apache-2.0
|
etkirsch/scikit-learn
|
sklearn/datasets/lfw.py
|
141
|
19372
|
"""Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
|
bsd-3-clause
|
cfjhallgren/shogun
|
applications/tapkee/faces_embedding.py
|
5
|
2075
|
#!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Written (W) 2011 Sergey Lisitsyn
# Copyright (C) 2011 Sergey Lisitsyn
from shogun import *
from numpy import *
from matplotlib.offsetbox import TextArea, DrawingArea, OffsetImage, AnnotationBbox
import re,os,time
from pylab import *
def build_features(path):
files = os.listdir(path)
files.remove('README')
N = len(files)
(nd,md) = imread(os.path.join(path,files[0])).shape
dim = nd*md
feature_matrix = zeros([dim,N])
for i,filename in enumerate(files):
feature_matrix[:,i] = imread(os.path.join(path,filename)).ravel()
return nd,md,RealFeatures(feature_matrix)
path = '../../data/faces/'
converter = DiffusionMaps
nd,md,features = build_features(path)
converter_instance = converter()
converter_instance.set_t(5)
converter_instance.set_target_dim(2)
start = time.time()
new_features = converter_instance.embed(features).get_feature_matrix()
print new_features.shape
end = time.time()
clusterer = KMeans
clusterer_instance = clusterer(2,EuclideanDistance())
clusterer_instance.train(features)
labels = clusterer_instance.apply().get_labels()
print labels
print 'applied %s, took %fs' % (converter_instance.get_name(), end-start)
print 'plotting'
fig = figure()
ax = fig.add_subplot(111,axisbg='#ffffff')
ax.scatter(new_features[0],new_features[1],color='black')
import random
for i in range(len(new_features[0])):
feature_vector = features.get_feature_vector(i)
Z = zeros([nd,md,4])
Z[:,:,0] = 255-feature_vector.reshape(nd,md)[::-1,:]
Z[:,:,1] = Z[:,:,0]
Z[:,:,2] = Z[:,:,0]
for k in range(nd):
for j in range(md):
Z[k,j,3] = pow(sin(k*pi/nd)*sin(j*pi/md),0.5)
imagebox = OffsetImage(Z,cmap=cm.gray,zoom=0.25)
ab = AnnotationBbox(imagebox, (new_features[0,i],new_features[1,i]),
pad=0.001,frameon=False)
ax.add_artist(ab)
axis('off')
savefig('faces.png')
show()
|
gpl-3.0
|
robertmattmueller/sdac-compiler
|
sympy/interactive/printing.py
|
31
|
15830
|
"""Tools for setting up printing in interactive sessions. """
from __future__ import print_function, division
import sys
from distutils.version import LooseVersion as V
from io import BytesIO
from sympy import latex as default_latex
from sympy import preview
from sympy.core.compatibility import integer_types
from sympy.utilities.misc import debug
def _init_python_printing(stringify_func):
"""Setup printing in Python interactive session. """
import sys
from sympy.core.compatibility import builtins
def _displayhook(arg):
"""Python's pretty-printer display hook.
This function was adapted from:
http://www.python.org/dev/peps/pep-0217/
"""
if arg is not None:
builtins._ = None
print(stringify_func(arg))
builtins._ = arg
sys.displayhook = _displayhook
def _init_ipython_printing(ip, stringify_func, use_latex, euler, forecolor,
backcolor, fontsize, latex_mode, print_builtin,
latex_printer):
"""Setup printing in IPython interactive session. """
try:
from IPython.lib.latextools import latex_to_png
except ImportError:
pass
preamble = "\\documentclass[%s]{article}\n" \
"\\pagestyle{empty}\n" \
"\\usepackage{amsmath,amsfonts}%s\\begin{document}"
if euler:
addpackages = '\\usepackage{euler}'
else:
addpackages = ''
preamble = preamble % (fontsize, addpackages)
imagesize = 'tight'
offset = "0cm,0cm"
resolution = 150
dvi = r"-T %s -D %d -bg %s -fg %s -O %s" % (
imagesize, resolution, backcolor, forecolor, offset)
dvioptions = dvi.split()
debug("init_printing: DVIOPTIONS:", dvioptions)
debug("init_printing: PREAMBLE:", preamble)
latex = latex_printer or default_latex
def _print_plain(arg, p, cycle):
"""caller for pretty, for use in IPython 0.11"""
if _can_print_latex(arg):
p.text(stringify_func(arg))
else:
p.text(IPython.lib.pretty.pretty(arg))
def _preview_wrapper(o):
exprbuffer = BytesIO()
try:
preview(o, output='png', viewer='BytesIO',
outputbuffer=exprbuffer, preamble=preamble,
dvioptions=dvioptions)
except Exception as e:
# IPython swallows exceptions
debug("png printing:", "_preview_wrapper exception raised:",
repr(e))
raise
return exprbuffer.getvalue()
def _matplotlib_wrapper(o):
# mathtext does not understand certain latex flags, so we try to
# replace them with suitable subs
o = o.replace(r'\operatorname', '')
o = o.replace(r'\overline', r'\bar')
return latex_to_png(o)
def _can_print_latex(o):
"""Return True if type o can be printed with LaTeX.
If o is a container type, this is True if and only if every element of
o can be printed with LaTeX.
"""
from sympy import Basic
from sympy.matrices import MatrixBase
from sympy.physics.vector import Vector, Dyadic
if isinstance(o, (list, tuple, set, frozenset)):
return all(_can_print_latex(i) for i in o)
elif isinstance(o, dict):
return all(_can_print_latex(i) and _can_print_latex(o[i]) for i in o)
elif isinstance(o, bool):
return False
# TODO : Investigate if "elif hasattr(o, '_latex')" is more useful
# to use here, than these explicit imports.
elif isinstance(o, (Basic, MatrixBase, Vector, Dyadic)):
return True
elif isinstance(o, (float, integer_types)) and print_builtin:
return True
return False
def _print_latex_png(o):
"""
A function that returns a png rendered by an external latex
distribution, falling back to matplotlib rendering
"""
if _can_print_latex(o):
s = latex(o, mode=latex_mode)
try:
return _preview_wrapper(s)
except RuntimeError:
if latex_mode != 'inline':
s = latex(o, mode='inline')
return _matplotlib_wrapper(s)
def _print_latex_matplotlib(o):
"""
A function that returns a png rendered by mathtext
"""
if _can_print_latex(o):
s = latex(o, mode='inline')
try:
return _matplotlib_wrapper(s)
except Exception:
# Matplotlib.mathtext cannot render some things (like
# matrices)
return None
def _print_latex_text(o):
"""
A function to generate the latex representation of sympy expressions.
"""
if _can_print_latex(o):
s = latex(o, mode='plain')
s = s.replace(r'\dag', r'\dagger')
s = s.strip('$')
return '$$%s$$' % s
def _result_display(self, arg):
"""IPython's pretty-printer display hook, for use in IPython 0.10
This function was adapted from:
ipython/IPython/hooks.py:155
"""
if self.rc.pprint:
out = stringify_func(arg)
if '\n' in out:
print
print(out)
else:
print(repr(arg))
import IPython
if V(IPython.__version__) >= '0.11':
from sympy.core.basic import Basic
from sympy.matrices.matrices import MatrixBase
from sympy.physics.vector import Vector, Dyadic
printable_types = [Basic, MatrixBase, float, tuple, list, set,
frozenset, dict, Vector, Dyadic] + list(integer_types)
plaintext_formatter = ip.display_formatter.formatters['text/plain']
for cls in printable_types:
plaintext_formatter.for_type(cls, _print_plain)
png_formatter = ip.display_formatter.formatters['image/png']
if use_latex in (True, 'png'):
debug("init_printing: using png formatter")
for cls in printable_types:
png_formatter.for_type(cls, _print_latex_png)
elif use_latex == 'matplotlib':
debug("init_printing: using matplotlib formatter")
for cls in printable_types:
png_formatter.for_type(cls, _print_latex_matplotlib)
else:
debug("init_printing: not using any png formatter")
for cls in printable_types:
# Better way to set this, but currently does not work in IPython
#png_formatter.for_type(cls, None)
if cls in png_formatter.type_printers:
png_formatter.type_printers.pop(cls)
latex_formatter = ip.display_formatter.formatters['text/latex']
if use_latex in (True, 'mathjax'):
debug("init_printing: using mathjax formatter")
for cls in printable_types:
latex_formatter.for_type(cls, _print_latex_text)
else:
debug("init_printing: not using text/latex formatter")
for cls in printable_types:
# Better way to set this, but currently does not work in IPython
#latex_formatter.for_type(cls, None)
if cls in latex_formatter.type_printers:
latex_formatter.type_printers.pop(cls)
else:
ip.set_hook('result_display', _result_display)
def _is_ipython(shell):
"""Is a shell instance an IPython shell?"""
# shortcut, so we don't import IPython if we don't have to
if 'IPython' not in sys.modules:
return False
try:
from IPython.core.interactiveshell import InteractiveShell
except ImportError:
# IPython < 0.11
try:
from IPython.iplib import InteractiveShell
except ImportError:
# Reaching this points means IPython has changed in a backward-incompatible way
# that we don't know about. Warn?
return False
return isinstance(shell, InteractiveShell)
def init_printing(pretty_print=True, order=None, use_unicode=None,
use_latex=None, wrap_line=None, num_columns=None,
no_global=False, ip=None, euler=False, forecolor='Black',
backcolor='Transparent', fontsize='10pt',
latex_mode='equation*', print_builtin=True,
str_printer=None, pretty_printer=None,
latex_printer=None):
"""
Initializes pretty-printer depending on the environment.
Parameters
==========
pretty_print: boolean
If True, use pretty_print to stringify or the provided pretty
printer; if False, use sstrrepr to stringify or the provided string
printer.
order: string or None
There are a few different settings for this parameter:
lex (default), which is lexographic order;
grlex, which is graded lexographic order;
grevlex, which is reversed graded lexographic order;
old, which is used for compatibility reasons and for long expressions;
None, which sets it to lex.
use_unicode: boolean or None
If True, use unicode characters;
if False, do not use unicode characters.
use_latex: string, boolean, or None
If True, use default latex rendering in GUI interfaces (png and
mathjax);
if False, do not use latex rendering;
if 'png', enable latex rendering with an external latex compiler,
falling back to matplotlib if external compilation fails;
if 'matplotlib', enable latex rendering with matplotlib;
if 'mathjax', enable latex text generation, for example MathJax
rendering in IPython notebook or text rendering in LaTeX documents
wrap_line: boolean
If True, lines will wrap at the end; if False, they will not wrap
but continue as one line. This is only relevant if `pretty_print` is
True.
num_columns: int or None
If int, number of columns before wrapping is set to num_columns; if
None, number of columns before wrapping is set to terminal width.
This is only relevant if `pretty_print` is True.
no_global: boolean
If True, the settings become system wide;
if False, use just for this console/session.
ip: An interactive console
This can either be an instance of IPython,
or a class that derives from code.InteractiveConsole.
euler: boolean, optional, default=False
Loads the euler package in the LaTeX preamble for handwritten style
fonts (http://www.ctan.org/pkg/euler).
forecolor: string, optional, default='Black'
DVI setting for foreground color.
backcolor: string, optional, default='Transparent'
DVI setting for background color.
fontsize: string, optional, default='10pt'
A font size to pass to the LaTeX documentclass function in the
preamble.
latex_mode: string, optional, default='equation*'
The mode used in the LaTeX printer. Can be one of:
{'inline'|'plain'|'equation'|'equation*'}.
print_builtin: boolean, optional, default=True
If true then floats and integers will be printed. If false the
printer will only print SymPy types.
str_printer: function, optional, default=None
A custom string printer function. This should mimic
sympy.printing.sstrrepr().
pretty_printer: function, optional, default=None
A custom pretty printer. This should mimic sympy.printing.pretty().
latex_printer: function, optional, default=None
A custom LaTeX printer. This should mimic sympy.printing.latex()
This should mimic sympy.printing.latex().
Examples
========
>>> from sympy.interactive import init_printing
>>> from sympy import Symbol, sqrt
>>> from sympy.abc import x, y
>>> sqrt(5)
sqrt(5)
>>> init_printing(pretty_print=True) # doctest: +SKIP
>>> sqrt(5) # doctest: +SKIP
___
\/ 5
>>> theta = Symbol('theta') # doctest: +SKIP
>>> init_printing(use_unicode=True) # doctest: +SKIP
>>> theta # doctest: +SKIP
\u03b8
>>> init_printing(use_unicode=False) # doctest: +SKIP
>>> theta # doctest: +SKIP
theta
>>> init_printing(order='lex') # doctest: +SKIP
>>> str(y + x + y**2 + x**2) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(order='grlex') # doctest: +SKIP
>>> str(y + x + y**2 + x**2) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(order='grevlex') # doctest: +SKIP
>>> str(y * x**2 + x * y**2) # doctest: +SKIP
x**2*y + x*y**2
>>> init_printing(order='old') # doctest: +SKIP
>>> str(x**2 + y**2 + x + y) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(num_columns=10) # doctest: +SKIP
>>> x**2 + x + y**2 + y # doctest: +SKIP
x + y +
x**2 + y**2
"""
import sys
from sympy.printing.printer import Printer
if pretty_print:
if pretty_printer is not None:
stringify_func = pretty_printer
else:
from sympy.printing import pretty as stringify_func
else:
if str_printer is not None:
stringify_func = str_printer
else:
from sympy.printing import sstrrepr as stringify_func
# Even if ip is not passed, double check that not in IPython shell
in_ipython = False
if ip is None:
try:
ip = get_ipython()
except NameError:
pass
else:
in_ipython = (ip is not None)
if ip and not in_ipython:
in_ipython = _is_ipython(ip)
if in_ipython and pretty_print:
try:
import IPython
# IPython 1.0 deprecates the frontend module, so we import directly
# from the terminal module to prevent a deprecation message from being
# shown.
if V(IPython.__version__) >= '1.0':
from IPython.terminal.interactiveshell import TerminalInteractiveShell
else:
from IPython.frontend.terminal.interactiveshell import TerminalInteractiveShell
from code import InteractiveConsole
except ImportError:
pass
else:
# This will be True if we are in the qtconsole or notebook
if not isinstance(ip, (InteractiveConsole, TerminalInteractiveShell)) \
and 'ipython-console' not in ''.join(sys.argv):
if use_unicode is None:
debug("init_printing: Setting use_unicode to True")
use_unicode = True
if use_latex is None:
debug("init_printing: Setting use_latex to True")
use_latex = True
if not no_global:
Printer.set_global_settings(order=order, use_unicode=use_unicode,
wrap_line=wrap_line, num_columns=num_columns)
else:
_stringify_func = stringify_func
if pretty_print:
stringify_func = lambda expr: \
_stringify_func(expr, order=order,
use_unicode=use_unicode,
wrap_line=wrap_line,
num_columns=num_columns)
else:
stringify_func = lambda expr: _stringify_func(expr, order=order)
if in_ipython:
_init_ipython_printing(ip, stringify_func, use_latex, euler,
forecolor, backcolor, fontsize, latex_mode,
print_builtin, latex_printer)
else:
_init_python_printing(stringify_func)
|
gpl-3.0
|
kmather73/ggplot
|
ggplot/tests/test_basic.py
|
12
|
9308
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from six.moves import xrange
from nose.tools import assert_equal, assert_true, assert_raises
from . import get_assert_same_ggplot, cleanup
assert_same_ggplot = get_assert_same_ggplot(__file__)
from ggplot import *
from ggplot.exampledata import diamonds
import numpy as np
import pandas as pd
def _build_testing_df():
df = pd.DataFrame({
"x": np.arange(0, 100),
"y": np.arange(0, 100),
"z": np.arange(0, 100)
})
df['cat'] = np.where(df.x*2 > 50, 'blah', 'blue')
df['cat'] = np.where(df.y > 50, 'hello', df.cat)
df['cat2'] = np.where(df.y < 15, 'one', 'two')
df['y'] = np.sin(df.y)
df['z'] = df['y'] + 100
df['c'] = np.where(df.x%2==0,"red", "blue")
return df
def _build_meat_df():
meat['date'] = pd.to_datetime(meat.date)
return meat
@cleanup
def test_geom_density():
df = _build_testing_df()
gg = ggplot(aes(x="x", color="c"), data=df)
gg = gg + geom_density() + xlab("x label") + ylab("y label")
assert_same_ggplot(gg, "geom_density")
@cleanup
def test_geom_histogram():
df = _build_testing_df()
# TODO: use fill aesthetic for a better test
gg = ggplot(aes(x="x", y="y", shape="cat2", color="cat"), data=df)
assert_same_ggplot(gg + geom_histogram(), "geom_hist")
assert_same_ggplot(gg + geom_histogram() + ggtitle("My Histogram"), "geom_hist_title")
@cleanup
def test_geom_point():
df = _build_testing_df()
gg = ggplot(aes(x="x", y="y", shape="cat2", color="cat"), data=df)
assert_same_ggplot(gg + geom_point(), "geom_point")
gg = gg + geom_point() + geom_vline(xintercept=50, ymin=-1.5, ymax=1.5)
assert_same_ggplot(gg, "geom_point_vline")
@cleanup
def test_geom_area():
df = _build_testing_df()
gg = ggplot(aes(x='x', ymax='y', ymin='z', color="cat2"), data=df)
assert_same_ggplot(gg + geom_area(), "geom_area")
@cleanup
def test_geom_text():
gg = ggplot(aes(x='wt',y='mpg',label='name'),data=mtcars) + geom_text()
assert_same_ggplot(gg, "geom_text")
@cleanup
def test_geom_line():
p = ggplot(mtcars, aes(x='wt', y='mpg', colour='factor(cyl)', size='mpg', linetype='factor(cyl)'))
assert_same_ggplot(p + geom_line(), "factor_geom_line")
@cleanup
def test_geom_rect():
df = pd.DataFrame({
'xmin':[3, 5, 3, 3, 9, 4, 8, 3, 9, 2, 9, 1, 11, 4, 7, 1],
'xmax':[10, 8, 10, 4, 10, 5, 9, 4, 10, 4, 11, 2, 12, 6, 9, 12],
'ymin':[3, 3, 6, 2, 2, 6, 6, 8, 8, 4, 4, 2, 2, 1, 1, 4],
'ymax':[5, 7, 7, 7, 7, 8, 8, 9, 9, 6, 6, 5, 5, 2, 2, 5]})
p = ggplot(df, aes(xmin='xmin', xmax='xmax', ymin='ymin', ymax='ymax'))
p += geom_rect(xmin=0, xmax=13, ymin=0, ymax=10)
p += geom_rect(colour="white", fill="white")
p += xlim(0, 13)
assert_same_ggplot(p, "geom_rect_inv")
@cleanup
def test_factor_geom_point():
p = ggplot(mtcars, aes(x='wt', y='mpg', colour='factor(cyl)', size='mpg', linetype='factor(cyl)'))
assert_same_ggplot(p + geom_point(), "factor_geom_point")
@cleanup
def test_factor_geom_point_line():
p = ggplot(mtcars, aes(x='wt', y='mpg', colour='factor(cyl)', size='mpg', linetype='factor(cyl)'))
assert_same_ggplot(p + geom_line() + geom_point(), "factor_geom_point_line")
@cleanup
def test_factor_point_line_title_lab():
p = ggplot(mtcars, aes(x='wt', y='mpg', colour='factor(cyl)', size='mpg', linetype='factor(cyl)'))
p = p + geom_point() + geom_line(color='lightblue') + ggtitle("Beef: It's What's for Dinner")
p = p + xlab("Date") + ylab("Head of Cattle Slaughtered")
assert_same_ggplot(p, "factor_complicated")
@cleanup
def test_labs():
p = ggplot(mtcars, aes(x='wt', y='mpg', colour='factor(cyl)', size='mpg', linetype='factor(cyl)'))
p = p + geom_point() + geom_line(color='lightblue')
p = p + labs(title="Beef: It's What's for Dinner", x="Date", y="Head of Cattle Slaughtered")
assert_same_ggplot(p, "labs")
@cleanup
def test_factor_bar():
p = ggplot(aes(x='factor(cyl)'), data=mtcars)
assert_same_ggplot(p + geom_histogram(), "factor_geom_bar")
@cleanup
def test_stats_smooth():
df = _build_testing_df()
gg = ggplot(aes(x="x", y="y", color="cat"), data=df)
gg = gg + stat_smooth(se=False) + ggtitle("My Smoothed Chart")
assert_same_ggplot(gg, "stat_smooth")
@cleanup
def test_stats_bin2d():
import matplotlib.pyplot as plt
if not hasattr(plt, "hist2d"):
import nose
raise nose.SkipTest("stat_bin2d only works with newer matplotlib (1.3) versions.")
df = _build_testing_df()
gg = ggplot(aes(x='x', y='y', shape='cat', color='cat2'), data=df)
assert_same_ggplot(gg + stat_bin2d(), "stat_bin2d")
@cleanup
def test_alpha_density():
gg = ggplot(aes(x='mpg'), data=mtcars)
assert_same_ggplot(gg + geom_density(fill=True, alpha=0.3), "geom_density_alpha")
@cleanup
def test_facet_wrap():
df = _build_testing_df()
gg = ggplot(aes(x='x', ymax='y', ymin='z'), data=df)
#assert_same_ggplot(gg + geom_bar() + facet_wrap(x="cat2"), "geom_bar_facet")
assert_same_ggplot(gg + geom_area() + facet_wrap(x="cat2"), "geom_area_facet")
@cleanup
def test_facet_wrap2():
meat = _build_meat_df()
meat_lng = pd.melt(meat, id_vars=['date'])
p = ggplot(aes(x='date', y='value', colour='variable'), data=meat_lng)
assert_same_ggplot(p + geom_density(fill=True, alpha=0.3) + facet_wrap("variable"), "geom_density_facet")
assert_same_ggplot(p + geom_line(alpha=0.3) + facet_wrap("variable"), "geom_line_facet")
@cleanup
def test_facet_grid_exceptions():
meat = _build_meat_df()
meat_lng = pd.melt(meat, id_vars=['date'])
p = ggplot(aes(x="date", y="value", colour="variable", shape="variable"), meat_lng)
with assert_raises(Exception):
print(p + geom_point() + facet_grid(y="variable"))
with assert_raises(Exception):
print(p + geom_point() + facet_grid(y="variable", x="NOT_AVAILABLE"))
with assert_raises(Exception):
print(p + geom_point() + facet_grid(y="NOT_AVAILABLE", x="variable"))
@cleanup
def test_facet_grid():
# only use a small subset of the data to speedup tests
# N=53940 -> N=7916 and only 2x2 facets
_mask1 = (diamonds.cut == "Ideal") | (diamonds.cut == "Good")
_mask2 = (diamonds.clarity == "SI2") | (diamonds.clarity == "VS1")
_df = diamonds[_mask1 & _mask2]
p = ggplot(aes(x='x', y='y', colour='z'), data=_df)
p = p + geom_point() + scale_colour_gradient(low="white", high="red")
p = p + facet_grid("cut", "clarity")
assert_same_ggplot(p, "diamonds_big")
p = ggplot(aes(x='carat'), data=_df)
p = p + geom_density() + facet_grid("cut", "clarity")
assert_same_ggplot(p, "diamonds_facet")
@cleanup
def test_smooth_se():
meat = _build_meat_df()
p = ggplot(aes(x='date', y='beef'), data=meat)
assert_same_ggplot(p + geom_point() + stat_smooth(), "point_smooth_se")
assert_same_ggplot(p + stat_smooth(), "smooth_se")
@cleanup
def test_scale_xy_continous():
meat = _build_meat_df()
p = ggplot(aes(x='date', y='beef'), data=meat)
p = p + geom_point() + scale_x_continuous("This is the X")
p = p + scale_y_continuous("Squared", limits=[0, 1500])
assert_same_ggplot(p, "scale1")
@cleanup
def test_ylim():
meat = _build_meat_df()
p = ggplot(aes(x='date', y='beef'), data=meat)
assert_same_ggplot(p + geom_point() + ylim(0, 1500), "ylim")
@cleanup
def test_partial_limits() :
p = ggplot(diamonds, aes('carat', 'price'))
assert_same_ggplot(p + geom_point(alpha=1/20.) + xlim(high = 4) + ylim(0), "partial_limits")
@cleanup
def test_partial_limits_facet() :
p = ggplot(diamonds, aes('carat', 'price', color="clarity"))
p = p + geom_point(alpha=1/20.) + facet_wrap(x="cut", scales="free") + xlim(low=0) + ylim(low=0)
assert_same_ggplot(p, "partial_limits_facet")
@cleanup
def test_scale_date():
meat = _build_meat_df()
gg = ggplot(aes(x='date', y='beef'), data=meat) + geom_line()
assert_same_ggplot(gg+scale_x_date(labels="%Y-%m-%d"), "scale_date")
@cleanup
def test_diamond():
p = ggplot(aes(x='x', y='y', colour='z'), data=diamonds.head(4))
p = p + geom_point() + scale_colour_gradient(low="white", high="red")
p = p + facet_wrap("cut")
assert_same_ggplot(p, "diamonds_small")
def test_aes_positional_args():
result = aes("weight", "hp")
expected = {"x": "weight", "y": "hp"}
assert_equal(result, expected)
result3 = aes("weight", "hp", "qsec")
expected3 = {"x": "weight", "y": "hp", "color": "qsec"}
assert_equal(result3, expected3)
def test_aes_keyword_args():
result = aes(x="weight", y="hp")
expected = {"x": "weight", "y": "hp"}
assert_equal(result, expected)
result3 = aes(x="weight", y="hp", color="qsec")
expected3 = {"x": "weight", "y": "hp", "color": "qsec"}
assert_equal(result3,expected3)
def test_aes_mixed_args():
result = aes("weight", "hp", color="qsec")
expected = {"x": "weight", "y": "hp", "color": "qsec"}
assert_equal(result, expected)
@cleanup
def test_scale_color_brewer() :
p = ggplot(diamonds, aes(x = "x", y="y"))
p = p + geom_line() + scale_color_brewer(type='qual', palette=2)
assert_same_ggplot(p, "scale_color_brewer")
|
bsd-2-clause
|
hainm/scikit-learn
|
examples/model_selection/randomized_search.py
|
201
|
3214
|
"""
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
|
bsd-3-clause
|
kambysese/mne-python
|
examples/forward/plot_forward_sensitivity_maps.py
|
14
|
4139
|
"""
.. _ex-sensitivity-maps:
================================================
Display sensitivity maps for EEG and MEG sensors
================================================
Sensitivity maps can be produced from forward operators that
indicate how well different sensor types will be able to detect
neural currents from different regions of the brain.
To get started with forward modeling see :ref:`tut-forward`.
"""
# Author: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import mne
from mne.datasets import sample
from mne.source_space import compute_distance_to_sensors
from mne.source_estimate import SourceEstimate
import matplotlib.pyplot as plt
print(__doc__)
data_path = sample.data_path()
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
subjects_dir = data_path + '/subjects'
# Read the forward solutions with surface orientation
fwd = mne.read_forward_solution(fwd_fname)
mne.convert_forward_solution(fwd, surf_ori=True, copy=False)
leadfield = fwd['sol']['data']
print("Leadfield size : %d x %d" % leadfield.shape)
###############################################################################
# Compute sensitivity maps
grad_map = mne.sensitivity_map(fwd, ch_type='grad', mode='fixed')
mag_map = mne.sensitivity_map(fwd, ch_type='mag', mode='fixed')
eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed')
###############################################################################
# Show gain matrix a.k.a. leadfield matrix with sensitivity map
picks_meg = mne.pick_types(fwd['info'], meg=True, eeg=False)
picks_eeg = mne.pick_types(fwd['info'], meg=False, eeg=True)
fig, axes = plt.subplots(2, 1, figsize=(10, 8), sharex=True)
fig.suptitle('Lead field matrix (500 dipoles only)', fontsize=14)
for ax, picks, ch_type in zip(axes, [picks_meg, picks_eeg], ['meg', 'eeg']):
im = ax.imshow(leadfield[picks, :500], origin='lower', aspect='auto',
cmap='RdBu_r')
ax.set_title(ch_type.upper())
ax.set_xlabel('sources')
ax.set_ylabel('sensors')
fig.colorbar(im, ax=ax)
fig_2, ax = plt.subplots()
ax.hist([grad_map.data.ravel(), mag_map.data.ravel(), eeg_map.data.ravel()],
bins=20, label=['Gradiometers', 'Magnetometers', 'EEG'],
color=['c', 'b', 'k'])
fig_2.legend()
ax.set(title='Normal orientation sensitivity',
xlabel='sensitivity', ylabel='count')
# sphinx_gallery_thumbnail_number = 3
brain_sens = grad_map.plot(
subjects_dir=subjects_dir, clim=dict(lims=[0, 50, 100]), figure=1)
brain_sens.add_text(0.1, 0.9, 'Gradiometer sensitivity', 'title', font_size=16)
###############################################################################
# Compare sensitivity map with distribution of source depths
# source space with vertices
src = fwd['src']
# Compute minimum Euclidean distances between vertices and MEG sensors
depths = compute_distance_to_sensors(src=src, info=fwd['info'],
picks=picks_meg).min(axis=1)
maxdep = depths.max() # for scaling
vertices = [src[0]['vertno'], src[1]['vertno']]
depths_map = SourceEstimate(data=depths, vertices=vertices, tmin=0.,
tstep=1.)
brain_dep = depths_map.plot(
subject='sample', subjects_dir=subjects_dir,
clim=dict(kind='value', lims=[0, maxdep / 2., maxdep]), figure=2)
brain_dep.add_text(0.1, 0.9, 'Source depth (m)', 'title', font_size=16)
###############################################################################
# Sensitivity is likely to co-vary with the distance between sources to
# sensors. To determine the strength of this relationship, we can compute the
# correlation between source depth and sensitivity values.
corr = np.corrcoef(depths, grad_map.data[:, 0])[0, 1]
print('Correlation between source depth and gradiomter sensitivity values: %f.'
% corr)
###############################################################################
# Gradiometer sensitiviy is highest close to the sensors, and decreases rapidly
# with inreasing source depth. This is confirmed by the high negative
# correlation between the two.
|
bsd-3-clause
|
GuessWhoSamFoo/pandas
|
pandas/tests/extension/base/ops.py
|
2
|
5816
|
import operator
import pytest
import pandas as pd
from pandas.core import ops
from .base import BaseExtensionTests
class BaseOpsUtil(BaseExtensionTests):
def get_op_from_name(self, op_name):
short_opname = op_name.strip('_')
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op
def check_opname(self, s, op_name, other, exc=Exception):
op = self.get_op_from_name(op_name)
self._check_op(s, op, other, op_name, exc)
def _check_op(self, s, op, other, op_name, exc=NotImplementedError):
if exc is None:
result = op(s, other)
expected = s.combine(other, op)
self.assert_series_equal(result, expected)
else:
with pytest.raises(exc):
op(s, other)
def _check_divmod_op(self, s, op, other, exc=Exception):
# divmod has multiple return values, so check separatly
if exc is None:
result_div, result_mod = op(s, other)
if op is divmod:
expected_div, expected_mod = s // other, s % other
else:
expected_div, expected_mod = other // s, other % s
self.assert_series_equal(result_div, expected_div)
self.assert_series_equal(result_mod, expected_mod)
else:
with pytest.raises(exc):
divmod(s, other)
class BaseArithmeticOpsTests(BaseOpsUtil):
"""Various Series and DataFrame arithmetic ops methods.
Subclasses supporting various ops should set the class variables
to indicate that they support ops of that kind
* series_scalar_exc = TypeError
* frame_scalar_exc = TypeError
* series_array_exc = TypeError
* divmod_exc = TypeError
"""
series_scalar_exc = TypeError
frame_scalar_exc = TypeError
series_array_exc = TypeError
divmod_exc = TypeError
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# series & scalar
op_name = all_arithmetic_operators
s = pd.Series(data)
self.check_opname(s, op_name, s.iloc[0], exc=self.series_scalar_exc)
@pytest.mark.xfail(run=False, reason="_reduce needs implementation")
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
op_name = all_arithmetic_operators
df = pd.DataFrame({'A': data})
self.check_opname(df, op_name, data[0], exc=self.frame_scalar_exc)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
# ndarray & other series
op_name = all_arithmetic_operators
s = pd.Series(data)
self.check_opname(s, op_name, pd.Series([s.iloc[0]] * len(s)),
exc=self.series_array_exc)
def test_divmod(self, data):
s = pd.Series(data)
self._check_divmod_op(s, divmod, 1, exc=self.divmod_exc)
self._check_divmod_op(1, ops.rdivmod, s, exc=self.divmod_exc)
def test_divmod_series_array(self, data):
s = pd.Series(data)
self._check_divmod_op(s, divmod, data)
def test_add_series_with_extension_array(self, data):
s = pd.Series(data)
result = s + data
expected = pd.Series(data + data)
self.assert_series_equal(result, expected)
def test_error(self, data, all_arithmetic_operators):
# invalid ops
op_name = all_arithmetic_operators
with pytest.raises(AttributeError):
getattr(data, op_name)
def test_direct_arith_with_series_returns_not_implemented(self, data):
# EAs should return NotImplemented for ops with Series.
# Pandas takes care of unboxing the series and calling the EA's op.
other = pd.Series(data)
if hasattr(data, '__add__'):
result = data.__add__(other)
assert result is NotImplemented
else:
raise pytest.skip(
"{} does not implement add".format(data.__class__.__name__)
)
class BaseComparisonOpsTests(BaseOpsUtil):
"""Various Series and DataFrame comparison ops methods."""
def _compare_other(self, s, data, op_name, other):
op = self.get_op_from_name(op_name)
if op_name == '__eq__':
assert getattr(data, op_name)(other) is NotImplemented
assert not op(s, other).all()
elif op_name == '__ne__':
assert getattr(data, op_name)(other) is NotImplemented
assert op(s, other).all()
else:
# array
assert getattr(data, op_name)(other) is NotImplemented
# series
s = pd.Series(data)
with pytest.raises(TypeError):
op(s, other)
def test_compare_scalar(self, data, all_compare_operators):
op_name = all_compare_operators
s = pd.Series(data)
self._compare_other(s, data, op_name, 0)
def test_compare_array(self, data, all_compare_operators):
op_name = all_compare_operators
s = pd.Series(data)
other = pd.Series([data[0]] * len(data))
self._compare_other(s, data, op_name, other)
def test_direct_arith_with_series_returns_not_implemented(self, data):
# EAs should return NotImplemented for ops with Series.
# Pandas takes care of unboxing the series and calling the EA's op.
other = pd.Series(data)
if hasattr(data, '__eq__'):
result = data.__eq__(other)
assert result is NotImplemented
else:
raise pytest.skip(
"{} does not implement __eq__".format(data.__class__.__name__)
)
|
bsd-3-clause
|
lekshmideepu/nest-simulator
|
pynest/examples/glif_cond_neuron.py
|
14
|
9655
|
# -*- coding: utf-8 -*-
#
# glif_cond_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Conductance-based generalized leaky integrate and fire (GLIF) neuron example
----------------------------------------------------------------------------
Simple example of how to use the ``glif_cond`` neuron model for
five different levels of GLIF neurons.
Four stimulation paradigms are illustrated for the GLIF model
with externally applied current and spikes impinging
Voltage traces, injecting current traces, threshold traces, synaptic
conductance traces and spikes are shown.
KEYWORDS: glif_cond
"""
##############################################################################
# First, we import all necessary modules to simulate, analyze and plot this
# example.
import nest
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
##############################################################################
# We initialize the nest and set the simulation resolution.
nest.ResetKernel()
resolution = 0.05
nest.SetKernelStatus({"resolution": resolution})
###############################################################################
# We create the five levels of GLIF model to be tested, i.e.,
# ``lif``, ``lif_r``, ``lif_asc``, ``lif_r_asc``, ``lif_r_asc_a``.
# For each level of GLIF model, we create a ``glif_cond`` node. The node is
# created by setting relative model mechanism parameters. Other neuron
# parameters are set as default. The five ``glif_cond`` node handles are
# combined as a list. Note that the default number of synaptic ports
# is two for spike inputs. One port is excitation receptor with time
# constant being 0.2 ms and reversal potential being 0.0 mV. The other port is
# inhibition receptor with time constant being 2.0 ms and -85.0 mV.
# Note that users can set as many synaptic ports as needed for ``glif_cond``
# by setting array parameters ``tau_syn`` and ``E_rev`` of the model.
n_lif = nest.Create("glif_cond",
params={"spike_dependent_threshold": False,
"after_spike_currents": False,
"adapting_threshold": False})
n_lif_r = nest.Create("glif_cond",
params={"spike_dependent_threshold": True,
"after_spike_currents": False,
"adapting_threshold": False})
n_lif_asc = nest.Create("glif_cond",
params={"spike_dependent_threshold": False,
"after_spike_currents": True,
"adapting_threshold": False})
n_lif_r_asc = nest.Create("glif_cond",
params={"spike_dependent_threshold": True,
"after_spike_currents": True,
"adapting_threshold": False})
n_lif_r_asc_a = nest.Create("glif_cond",
params={"spike_dependent_threshold": True,
"after_spike_currents": True,
"adapting_threshold": True})
neurons = n_lif + n_lif_r + n_lif_asc + n_lif_r_asc + n_lif_r_asc_a
###############################################################################
# For the stimulation input to the glif_cond neurons, we create one excitation
# spike generator and one inhibition spike generator, each of which generates
# three spikes; we also create one step current generator and a Poisson
# generator, a parrot neuron(to be paired with the Poisson generator).
# The three different injections are spread to three different time periods,
# i.e., 0 ms ~ 200 ms, 200 ms ~ 500 ms, 600 ms ~ 900 ms.
# Configuration of the current generator includes the definition of the start
# and stop times and the amplitude of the injected current. Configuration of
# the Poisson generator includes the definition of the start and stop times and
# the rate of the injected spike train.
espikes = nest.Create("spike_generator",
params={"spike_times": [10., 100., 150.],
"spike_weights": [20.]*3})
ispikes = nest.Create("spike_generator",
params={"spike_times": [15., 99., 150.],
"spike_weights": [-20.]*3})
cg = nest.Create("step_current_generator",
params={"amplitude_values": [400., ],
"amplitude_times": [200., ],
"start": 200., "stop": 500.})
pg = nest.Create("poisson_generator",
params={"rate": 15000., "start": 600., "stop": 900.})
pn = nest.Create("parrot_neuron")
###############################################################################
# The generators are then connected to the neurons. Specification of
# the ``receptor_type`` uniquely defines the target receptor.
# We connect current generator to receptor 0, the excitation spike generator
# and the Poisson generator (via parrot neuron) to receptor 1, and the
# inhibition spike generator to receptor 2 of the GLIF neurons.
# Note that Poisson generator is connected to parrot neuron to transit the
# spikes to the glif_cond neuron.
nest.Connect(cg, neurons, syn_spec={"delay": resolution})
nest.Connect(espikes, neurons,
syn_spec={"delay": resolution, "receptor_type": 1})
nest.Connect(ispikes, neurons,
syn_spec={"delay": resolution, "receptor_type": 2})
nest.Connect(pg, pn, syn_spec={"delay": resolution})
nest.Connect(pn, neurons, syn_spec={"delay": resolution, "receptor_type": 1})
###############################################################################
# A ``multimeter`` is created and connected to the neurons. The parameters
# specified for the multimeter include the list of quantities that should be
# recorded and the time interval at which quantities are measured.
mm = nest.Create("multimeter",
params={"interval": resolution,
"record_from": ["V_m", "I", "g_1", "g_2",
"threshold",
"threshold_spike",
"threshold_voltage",
"ASCurrents_sum"]})
nest.Connect(mm, neurons)
###############################################################################
# A ``spike_recorder`` is created and connected to the neurons record the
# spikes generated by the glif_cond neurons.
sr = nest.Create("spike_recorder")
nest.Connect(neurons, sr)
###############################################################################
# Run the simulation for 1000 ms and retrieve recorded data from
# the multimeter and spike recorder.
nest.Simulate(1000.)
data = mm.events
senders = data["senders"]
spike_data = sr.events
spike_senders = spike_data["senders"]
spikes = spike_data["times"]
###############################################################################
# We plot the time traces of the membrane potential (in blue) and
# the overall threshold (in green), and the spikes (as red dots) in one panel;
# the spike component of threshold (in yellow) and the voltage component of
# threshold (in black) in another panel; the injected currents (in strong blue),
# the sum of after spike currents (in cyan) in the third panel; and the synaptic
# conductances of the two receptors (in blue and orange) in responding to the
# spike inputs to the neurons in the fourth panel. We plot all these four
# panels for each level of GLIF model in a separated figure.
glif_models = ["lif", "lif_r", "lif_asc", "lif_r_asc", "lif_r_asc_a"]
for i in range(len(glif_models)):
glif_model = glif_models[i]
node_id = neurons[i].global_id
plt.figure(glif_model)
gs = gridspec.GridSpec(4, 1, height_ratios=[2, 1, 1, 1])
t = data["times"][senders == 1]
ax1 = plt.subplot(gs[0])
plt.plot(t, data["V_m"][senders == node_id], "b")
plt.plot(t, data["threshold"][senders == node_id], "g--")
plt.plot(spikes[spike_senders == node_id],
[max(data["threshold"][senders == node_id]) * 0.95] *
len(spikes[spike_senders == node_id]), "r.")
plt.legend(["V_m", "threshold", "spike"])
plt.ylabel("V (mV)")
plt.title("Simulation of glif_cond neuron of " + glif_model)
ax2 = plt.subplot(gs[1])
plt.plot(t, data["threshold_spike"][senders == node_id], "y")
plt.plot(t, data["threshold_voltage"][senders == node_id], "k--")
plt.legend(["threshold_spike", "threshold_voltage"])
plt.ylabel("V (mV)")
ax3 = plt.subplot(gs[2])
plt.plot(t, data["I"][senders == node_id], "--")
plt.plot(t, data["ASCurrents_sum"][senders == node_id], "c-.")
plt.legend(["I_e", "ASCurrents_sum", "I_syn"])
plt.ylabel("I (pA)")
plt.xlabel("t (ms)")
ax4 = plt.subplot(gs[3])
plt.plot(t, data["g_1"][senders == node_id], "-")
plt.plot(t, data["g_2"][senders == node_id], "--")
plt.legend(["G_1", "G_2"])
plt.ylabel("G (nS)")
plt.xlabel("t (ms)")
plt.show()
|
gpl-2.0
|
enigmampc/catalyst
|
catalyst/data/resample.py
|
1
|
26992
|
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from abc import ABCMeta, abstractmethod
import numpy as np
import pandas as pd
from six import with_metaclass
from catalyst.data._resample import (
_minute_to_session_open,
_minute_to_session_high,
_minute_to_session_low,
_minute_to_session_close,
_minute_to_session_volume,
)
from catalyst.data.bar_reader import NoDataOnDate
from catalyst.data.minute_bars import MinuteBarReader
from catalyst.data.session_bars import SessionBarReader
from catalyst.utils.memoize import lazyval
_MINUTE_TO_SESSION_OHCLV_HOW = OrderedDict((
('open', 'first'),
('high', 'max'),
('low', 'min'),
('close', 'last'),
('volume', 'sum'),
))
def minute_frame_to_session_frame(minute_frame, calendar):
"""
Resample a DataFrame with minute data into the frame expected by a
BcolzDailyBarWriter.
Parameters
----------
minute_frame : pd.DataFrame
A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`,
and `dt` (minute dts)
calendar : catalyst.utils.calendars.trading_calendar.TradingCalendar
A TradingCalendar on which session labels to resample from minute
to session.
Return
------
session_frame : pd.DataFrame
A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`,
and `day` (datetime-like).
"""
how = OrderedDict((c, _MINUTE_TO_SESSION_OHCLV_HOW[c])
for c in minute_frame.columns)
return minute_frame.groupby(calendar.minute_to_session_label).agg(how)
def minute_to_session(column, close_locs, data, out):
"""
Resample an array with minute data into an array with session data.
This function assumes that the minute data is the exact length of all
minutes in the sessions in the output.
Parameters
----------
column : str
The `open`, `high`, `low`, `close`, or `volume` column.
close_locs : array[intp]
The locations in `data` which are the market close minutes.
data : array[float64|uint32]
The minute data to be sampled into session data.
The first value should align with the market open of the first session,
containing values for all minutes for all sessions. With the last value
being the market close of the last session.
out : array[float64|uint32]
The output array into which to write the sampled sessions.
"""
if column == 'open':
_minute_to_session_open(close_locs, data, out)
elif column == 'high':
_minute_to_session_high(close_locs, data, out)
elif column == 'low':
_minute_to_session_low(close_locs, data, out)
elif column == 'close':
_minute_to_session_close(close_locs, data, out)
elif column == 'volume':
_minute_to_session_volume(close_locs, data, out)
return out
class DailyHistoryAggregator(object):
"""
Converts minute pricing data into a daily summary, to be used for the
last slot in a call to history with a frequency of `1d`.
This summary is the same as a daily bar rollup of minute data, with the
distinction that the summary is truncated to the `dt` requested.
i.e. the aggregation slides forward during a the course of simulation day.
Provides aggregation for `open`, `high`, `low`, `close`, and `volume`.
The aggregation rules for each price type is documented in their respective
"""
def __init__(self, market_opens, minute_reader, trading_calendar):
self._market_opens = market_opens
self._minute_reader = minute_reader
self._trading_calendar = trading_calendar
# The caches are structured as (date, market_open, entries), where
# entries is a dict of asset -> (last_visited_dt, value)
#
# Whenever an aggregation method determines the current value,
# the entry for the respective asset should be overwritten with a new
# entry for the current dt.value (int) and aggregation value.
#
# When the requested dt's date is different from date the cache is
# flushed, so that the cache entries do not grow unbounded.
#
# Example cache:
# cache = (date(2016, 3, 17),
# pd.Timestamp('2016-03-17 13:31', tz='UTC'),
# {
# 1: (1458221460000000000, np.nan),
# 2: (1458221460000000000, 42.0),
# })
self._caches = {
'open': None,
'high': None,
'low': None,
'close': None,
'volume': None
}
# The int value is used for deltas to avoid extra computation from
# creating new Timestamps.
self._one_min = pd.Timedelta('1 min').value
def _prelude(self, dt, field):
session = self._trading_calendar.minute_to_session_label(dt)
dt_value = dt.value
cache = self._caches[field]
if cache is None or cache[0] != session:
market_open = self._market_opens.loc[session]
cache = self._caches[field] = (session, market_open, {})
_, market_open, entries = cache
try:
market_open = market_open.tz_localize('UTC')
except TypeError:
market_open = market_open.tz_convert('UTC')
if dt != market_open:
prev_dt = dt_value - self._one_min
else:
prev_dt = None
return market_open, prev_dt, dt_value, entries
def opens(self, assets, dt):
"""
The open field's aggregation returns the first value that occurs
for the day, if there has been no data on or before the `dt` the open
is `nan`.
Once the first non-nan open is seen, that value remains constant per
asset for the remainder of the day.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'open')
opens = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
opens.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'open')
entries[asset] = (dt_value, val)
opens.append(val)
continue
else:
try:
last_visited_dt, first_open = entries[asset]
if last_visited_dt == dt_value:
opens.append(first_open)
continue
elif not pd.isnull(first_open):
opens.append(first_open)
entries[asset] = (dt_value, first_open)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['open'],
after_last,
dt,
[asset],
)[0]
nonnan = window[~pd.isnull(window)]
if len(nonnan):
val = nonnan[0]
else:
val = np.nan
entries[asset] = (dt_value, val)
opens.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['open'],
market_open,
dt,
[asset],
)[0]
nonnan = window[~pd.isnull(window)]
if len(nonnan):
val = nonnan[0]
else:
val = np.nan
entries[asset] = (dt_value, val)
opens.append(val)
continue
return np.array(opens)
def highs(self, assets, dt):
"""
The high field's aggregation returns the largest high seen between
the market open and the current dt.
If there has been no data on or before the `dt` the high is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'high')
highs = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
highs.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'high')
entries[asset] = (dt_value, val)
highs.append(val)
continue
else:
try:
last_visited_dt, last_max = entries[asset]
if last_visited_dt == dt_value:
highs.append(last_max)
continue
elif last_visited_dt == prev_dt:
curr_val = self._minute_reader.get_value(
asset, dt, 'high')
if pd.isnull(curr_val):
val = last_max
elif pd.isnull(last_max):
val = curr_val
else:
val = max(last_max, curr_val)
entries[asset] = (dt_value, val)
highs.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['high'],
after_last,
dt,
[asset],
)[0].T
val = np.nanmax(np.append(window, last_max))
entries[asset] = (dt_value, val)
highs.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['high'],
market_open,
dt,
[asset],
)[0].T
val = np.nanmax(window)
entries[asset] = (dt_value, val)
highs.append(val)
continue
return np.array(highs)
def lows(self, assets, dt):
"""
The low field's aggregation returns the smallest low seen between
the market open and the current dt.
If there has been no data on or before the `dt` the low is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'low')
lows = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
lows.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'low')
entries[asset] = (dt_value, val)
lows.append(val)
continue
else:
try:
last_visited_dt, last_min = entries[asset]
if last_visited_dt == dt_value:
lows.append(last_min)
continue
elif last_visited_dt == prev_dt:
curr_val = self._minute_reader.get_value(
asset, dt, 'low')
val = np.nanmin([last_min, curr_val])
entries[asset] = (dt_value, val)
lows.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['low'],
after_last,
dt,
[asset],
)[0].T
val = np.nanmin(np.append(window, last_min))
entries[asset] = (dt_value, val)
lows.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['low'],
market_open,
dt,
[asset],
)[0].T
val = np.nanmin(window)
entries[asset] = (dt_value, val)
lows.append(val)
continue
return np.array(lows)
def closes(self, assets, dt):
"""
The close field's aggregation returns the latest close at the given
dt.
If the close for the given dt is `nan`, the most recent non-nan
`close` is used.
If there has been no data on or before the `dt` the close is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'close')
closes = []
session_label = self._trading_calendar.minute_to_session_label(dt)
def _get_filled_close(asset):
"""
Returns the most recent non-nan close for the asset in this
session. If there has been no data in this session on or before the
`dt`, returns `nan`
"""
window = self._minute_reader.load_raw_arrays(
['close'],
market_open,
dt,
[asset],
)[0]
try:
return window[~np.isnan(window)][-1]
except IndexError:
return np.NaN
for asset in assets:
if not asset.is_alive_for_session(session_label):
closes.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'close')
entries[asset] = (dt_value, val)
closes.append(val)
continue
else:
try:
last_visited_dt, last_close = entries[asset]
if last_visited_dt == dt_value:
closes.append(last_close)
continue
elif last_visited_dt == prev_dt:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = last_close
entries[asset] = (dt_value, val)
closes.append(val)
continue
else:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = _get_filled_close(asset)
entries[asset] = (dt_value, val)
closes.append(val)
continue
except KeyError:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = _get_filled_close(asset)
entries[asset] = (dt_value, val)
closes.append(val)
continue
return np.array(closes)
def volumes(self, assets, dt):
"""
The volume field's aggregation returns the sum of all volumes
between the market open and the `dt`
If there has been no data on or before the `dt` the volume is 0.
Returns
-------
np.array with dtype=int64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'volume')
volumes = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
volumes.append(0)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'volume')
entries[asset] = (dt_value, val)
volumes.append(val)
continue
else:
try:
last_visited_dt, last_total = entries[asset]
if last_visited_dt == dt_value:
volumes.append(last_total)
continue
elif last_visited_dt == prev_dt:
val = self._minute_reader.get_value(
asset, dt, 'volume')
val += last_total
entries[asset] = (dt_value, val)
volumes.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['volume'],
after_last,
dt,
[asset],
)[0]
val = np.nansum(window) + last_total
entries[asset] = (dt_value, val)
volumes.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['volume'],
market_open,
dt,
[asset],
)[0]
val = np.nansum(window)
entries[asset] = (dt_value, val)
volumes.append(val)
continue
return np.array(volumes)
class MinuteResampleSessionBarReader(SessionBarReader):
def __init__(self, calendar, minute_bar_reader):
self._calendar = calendar
self._minute_bar_reader = minute_bar_reader
def _get_resampled(self, columns, start_session, end_session, assets):
range_open = self._calendar.session_open(start_session)
range_close = self._calendar.session_close(end_session)
minute_data = self._minute_bar_reader.load_raw_arrays(
columns,
range_open,
range_close,
assets,
)
# Get the index of the close minute for each session in the range.
# If the range contains only one session, the only close in the range
# is the last minute in the data. Otherwise, we need to get all the
# session closes and find their indices in the range of minutes.
if start_session == end_session:
close_ilocs = np.array([len(minute_data[0]) - 1], dtype=np.int64)
else:
minutes = self._calendar.minutes_in_range(
range_open,
range_close,
)
session_closes = self._calendar.session_closes_in_range(
start_session,
end_session,
)
close_ilocs = minutes.searchsorted(session_closes.values)
results = []
shape = (len(close_ilocs), len(assets))
for col in columns:
if col != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
results.append(out)
for i in range(len(assets)):
for j, column in enumerate(columns):
data = minute_data[j][:, i]
minute_to_session(column, close_ilocs, data, results[j][:, i])
return results
@property
def trading_calendar(self):
return self._calendar
def load_raw_arrays(self, columns, start_dt, end_dt, sids):
return self._get_resampled(columns, start_dt, end_dt, sids)
def get_value(self, sid, session, colname):
# WARNING: This will need caching or other optimization if used in a
# tight loop.
# This was developed to complete interface, but has not been tuned
# for real world use.
return self._get_resampled([colname], session, session, [sid])[0][0][0]
@lazyval
def sessions(self):
cal = self._calendar
first = self._minute_bar_reader.first_trading_day
last = cal.minute_to_session_label(
self._minute_bar_reader.last_available_dt)
return cal.sessions_in_range(first, last)
@lazyval
def last_available_dt(self):
return self.trading_calendar.minute_to_session_label(
self._minute_bar_reader.last_available_dt
)
@property
def first_trading_day(self):
return self._minute_bar_reader.first_trading_day
def get_last_traded_dt(self, asset, dt):
return self.trading_calendar.minute_to_session_label(
self._minute_bar_reader.get_last_traded_dt(asset, dt))
class ReindexBarReader(with_metaclass(ABCMeta)):
"""
A base class for readers which reindexes results, filling in the additional
indices with empty data.
Used to align the reading assets which trade on different calendars.
Currently only supports a ``trading_calendar`` which is a superset of the
``reader``'s calendar.
Parameters
----------
- trading_calendar : catalyst.utils.trading_calendar.TradingCalendar
The calendar to use when indexing results from the reader.
- reader : MinuteBarReader|SessionBarReader
The reader which has a calendar that is a subset of the desired
``trading_calendar``.
- first_trading_session : pd.Timestamp
The first trading session the reader should provide. Must be specified,
since the ``reader``'s first session may not exactly align with the
desired calendar. Specifically, in the case where the first session
on the target calendar is a holiday on the ``reader``'s calendar.
- last_trading_session : pd.Timestamp
The last trading session the reader should provide. Must be specified,
since the ``reader``'s last session may not exactly align with the
desired calendar. Specifically, in the case where the last session
on the target calendar is a holiday on the ``reader``'s calendar.
"""
def __init__(self,
trading_calendar,
reader,
first_trading_session,
last_trading_session):
self._trading_calendar = trading_calendar
self._reader = reader
self._first_trading_session = first_trading_session
self._last_trading_session = last_trading_session
@property
def last_available_dt(self):
return self._reader.last_available_dt
def get_last_traded_dt(self, sid, dt):
return self._reader.get_last_traded_dt(sid, dt)
@property
def first_trading_day(self):
return self._reader.first_trading_day
def get_value(self, sid, dt, field):
# Give an empty result if no data is present.
try:
return self._reader.get_value(sid, dt, field)
except NoDataOnDate:
if field == 'volume':
return 0
else:
return np.nan
@abstractmethod
def _outer_dts(self, start_dt, end_dt):
raise NotImplementedError
@abstractmethod
def _inner_dts(self, start_dt, end_dt):
raise NotImplementedError
@property
def trading_calendar(self):
return self._trading_calendar
@lazyval
def sessions(self):
return self.trading_calendar.sessions_in_range(
self._first_trading_session,
self._last_trading_session
)
def load_raw_arrays(self, fields, start_dt, end_dt, sids):
outer_dts = self._outer_dts(start_dt, end_dt)
inner_dts = self._inner_dts(start_dt, end_dt)
indices = outer_dts.searchsorted(inner_dts)
shape = len(outer_dts), len(sids)
outer_results = []
if len(inner_dts) > 0:
inner_results = self._reader.load_raw_arrays(
fields, inner_dts[0], inner_dts[-1], sids)
else:
inner_results = None
for i, field in enumerate(fields):
if field != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
if inner_results is not None:
out[indices] = inner_results[i]
outer_results.append(out)
return outer_results
class ReindexMinuteBarReader(ReindexBarReader, MinuteBarReader):
"""
See: ``ReindexBarReader``
"""
def _outer_dts(self, start_dt, end_dt):
return self._trading_calendar.minutes_in_range(start_dt, end_dt)
def _inner_dts(self, start_dt, end_dt):
return self._reader.calendar.minutes_in_range(start_dt, end_dt)
class ReindexSessionBarReader(ReindexBarReader, SessionBarReader):
"""
See: ``ReindexBarReader``
"""
def _outer_dts(self, start_dt, end_dt):
return self.trading_calendar.sessions_in_range(start_dt, end_dt)
def _inner_dts(self, start_dt, end_dt):
return self._reader.trading_calendar.sessions_in_range(
start_dt, end_dt)
|
apache-2.0
|
lobnek/pyutil
|
pyutil/timeseries/merge.py
|
1
|
1293
|
import pandas as pd
def merge(new, old=None):
# very smart merging here, new and old merge
if new is not None:
if old is not None:
x = pd.concat((new, old), sort=True)
return x.groupby(x.index).first().sort_index()
else:
return new
else:
return old
# also works for frames, etc.
def last_index(ts, default=None):
try:
# if the object is empty
return ts.last_valid_index() or default
except AttributeError:
return default
def first_index(ts, default=None):
"""
Return the first valid index of a Series or DataFrame
:param ts:
:param default: Use if the series has no valid entries or if the argument is None
:return:
"""
try:
return ts.first_valid_index() or default
except AttributeError:
return default
def to_datetime(ts=None):
try:
ts.index = pd.to_datetime(ts.index)
return ts
except AttributeError:
return None
def to_date(ts=None, format=None):
try:
if format:
ts.index = [t.strftime(format) for t in to_datetime(ts).index]
else:
ts.index = [t.date() for t in to_datetime(ts).index]
return ts
except AttributeError:
return None
|
mit
|
public-ink/public-ink
|
server/appengine/lib/mpl_toolkits/axes_grid1/parasite_axes.py
|
10
|
15472
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import warnings
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.artist as martist
import matplotlib.transforms as mtransforms
import matplotlib.collections as mcoll
import matplotlib.legend as mlegend
from matplotlib.axes import subplot_class_factory
from .mpl_axes import Axes
from matplotlib.transforms import Bbox
import numpy as np
import matplotlib.cbook as cbook
is_string_like = cbook.is_string_like
class ParasiteAxesBase(object):
def get_images_artists(self):
artists = set([a for a in self.get_children() if a.get_visible()])
images = set([a for a in self.images if a.get_visible()])
return list(images), list(artists - images)
def __init__(self, parent_axes, **kargs):
self._parent_axes = parent_axes
kargs.update(dict(frameon=False))
self._get_base_axes_attr("__init__")(self, parent_axes.figure,
parent_axes._position, **kargs)
def cla(self):
self._get_base_axes_attr("cla")(self)
martist.setp(self.get_children(), visible=False)
self._get_lines = self._parent_axes._get_lines
# In mpl's Axes, zorders of x- and y-axis are originally set
# within Axes.draw().
if self._axisbelow:
self.xaxis.set_zorder(0.5)
self.yaxis.set_zorder(0.5)
else:
self.xaxis.set_zorder(2.5)
self.yaxis.set_zorder(2.5)
_parasite_axes_classes = {}
def parasite_axes_class_factory(axes_class=None):
if axes_class is None:
axes_class = Axes
new_class = _parasite_axes_classes.get(axes_class)
if new_class is None:
def _get_base_axes_attr(self, attrname):
return getattr(axes_class, attrname)
new_class = type(str("%sParasite" % (axes_class.__name__)),
(ParasiteAxesBase, axes_class),
{'_get_base_axes_attr': _get_base_axes_attr})
_parasite_axes_classes[axes_class] = new_class
return new_class
ParasiteAxes = parasite_axes_class_factory()
# #class ParasiteAxes(ParasiteAxesBase, Axes):
# @classmethod
# def _get_base_axes_attr(cls, attrname):
# return getattr(Axes, attrname)
class ParasiteAxesAuxTransBase(object):
def __init__(self, parent_axes, aux_transform, viewlim_mode=None,
**kwargs):
self.transAux = aux_transform
self.set_viewlim_mode(viewlim_mode)
self._parasite_axes_class.__init__(self, parent_axes, **kwargs)
def _set_lim_and_transforms(self):
self.transAxes = self._parent_axes.transAxes
self.transData = \
self.transAux + \
self._parent_axes.transData
self._xaxis_transform = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
self._yaxis_transform = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
def set_viewlim_mode(self, mode):
if mode not in [None, "equal", "transform"]:
raise ValueError("Unknown mode : %s" % (mode,))
else:
self._viewlim_mode = mode
def get_viewlim_mode(self):
return self._viewlim_mode
def update_viewlim(self):
viewlim = self._parent_axes.viewLim.frozen()
mode = self.get_viewlim_mode()
if mode is None:
pass
elif mode == "equal":
self.axes.viewLim.set(viewlim)
elif mode == "transform":
self.axes.viewLim.set(viewlim.transformed(self.transAux.inverted()))
else:
raise ValueError("Unknown mode : %s" % (self._viewlim_mode,))
def _pcolor(self, method_name, *XYC, **kwargs):
if len(XYC) == 1:
C = XYC[0]
ny, nx = C.shape
gx = np.arange(-0.5, nx, 1.)
gy = np.arange(-0.5, ny, 1.)
X, Y = np.meshgrid(gx, gy)
else:
X, Y, C = XYC
pcolor_routine = self._get_base_axes_attr(method_name)
if "transform" in kwargs:
mesh = pcolor_routine(self, X, Y, C, **kwargs)
else:
orig_shape = X.shape
xy = np.vstack([X.flat, Y.flat])
xyt=xy.transpose()
wxy = self.transAux.transform(xyt)
gx, gy = wxy[:,0].reshape(orig_shape), wxy[:,1].reshape(orig_shape)
mesh = pcolor_routine(self, gx, gy, C, **kwargs)
mesh.set_transform(self._parent_axes.transData)
return mesh
def pcolormesh(self, *XYC, **kwargs):
return self._pcolor("pcolormesh", *XYC, **kwargs)
def pcolor(self, *XYC, **kwargs):
return self._pcolor("pcolor", *XYC, **kwargs)
def _contour(self, method_name, *XYCL, **kwargs):
if len(XYCL) <= 2:
C = XYCL[0]
ny, nx = C.shape
gx = np.arange(0., nx, 1.)
gy = np.arange(0., ny, 1.)
X,Y = np.meshgrid(gx, gy)
CL = XYCL
else:
X, Y = XYCL[:2]
CL = XYCL[2:]
contour_routine = self._get_base_axes_attr(method_name)
if "transform" in kwargs:
cont = contour_routine(self, X, Y, *CL, **kwargs)
else:
orig_shape = X.shape
xy = np.vstack([X.flat, Y.flat])
xyt=xy.transpose()
wxy = self.transAux.transform(xyt)
gx, gy = wxy[:,0].reshape(orig_shape), wxy[:,1].reshape(orig_shape)
cont = contour_routine(self, gx, gy, *CL, **kwargs)
for c in cont.collections:
c.set_transform(self._parent_axes.transData)
return cont
def contour(self, *XYCL, **kwargs):
return self._contour("contour", *XYCL, **kwargs)
def contourf(self, *XYCL, **kwargs):
return self._contour("contourf", *XYCL, **kwargs)
def apply_aspect(self, position=None):
self.update_viewlim()
self._get_base_axes_attr("apply_aspect")(self)
#ParasiteAxes.apply_aspect()
_parasite_axes_auxtrans_classes = {}
def parasite_axes_auxtrans_class_factory(axes_class=None):
if axes_class is None:
parasite_axes_class = ParasiteAxes
elif not issubclass(axes_class, ParasiteAxesBase):
parasite_axes_class = parasite_axes_class_factory(axes_class)
else:
parasite_axes_class = axes_class
new_class = _parasite_axes_auxtrans_classes.get(parasite_axes_class)
if new_class is None:
new_class = type(str("%sParasiteAuxTrans" % (parasite_axes_class.__name__)),
(ParasiteAxesAuxTransBase, parasite_axes_class),
{'_parasite_axes_class': parasite_axes_class,
'name': 'parasite_axes'})
_parasite_axes_auxtrans_classes[parasite_axes_class] = new_class
return new_class
ParasiteAxesAuxTrans = parasite_axes_auxtrans_class_factory(axes_class=ParasiteAxes)
def _get_handles(ax):
handles = ax.lines[:]
handles.extend(ax.patches)
handles.extend([c for c in ax.collections
if isinstance(c, mcoll.LineCollection)])
handles.extend([c for c in ax.collections
if isinstance(c, mcoll.RegularPolyCollection)])
handles.extend([c for c in ax.collections
if isinstance(c, mcoll.CircleCollection)])
return handles
class HostAxesBase(object):
def __init__(self, *args, **kwargs):
self.parasites = []
self._get_base_axes_attr("__init__")(self, *args, **kwargs)
def get_aux_axes(self, tr, viewlim_mode="equal", axes_class=None):
parasite_axes_class = parasite_axes_auxtrans_class_factory(axes_class)
ax2 = parasite_axes_class(self, tr, viewlim_mode)
# note that ax2.transData == tr + ax1.transData
# Anthing you draw in ax2 will match the ticks and grids of ax1.
self.parasites.append(ax2)
return ax2
def _get_legend_handles(self, legend_handler_map=None):
Axes_get_legend_handles = self._get_base_axes_attr("_get_legend_handles")
all_handles = list(Axes_get_legend_handles(self, legend_handler_map))
for ax in self.parasites:
all_handles.extend(ax._get_legend_handles(legend_handler_map))
return all_handles
def draw(self, renderer):
orig_artists = list(self.artists)
orig_images = list(self.images)
if hasattr(self, "get_axes_locator"):
locator = self.get_axes_locator()
if locator:
pos = locator(self, renderer)
self.set_position(pos, which="active")
self.apply_aspect(pos)
else:
self.apply_aspect()
else:
self.apply_aspect()
rect = self.get_position()
for ax in self.parasites:
ax.apply_aspect(rect)
images, artists = ax.get_images_artists()
self.images.extend(images)
self.artists.extend(artists)
self._get_base_axes_attr("draw")(self, renderer)
self.artists = orig_artists
self.images = orig_images
def cla(self):
for ax in self.parasites:
ax.cla()
self._get_base_axes_attr("cla")(self)
#super(HostAxes, self).cla()
def twinx(self, axes_class=None):
"""
create a twin of Axes for generating a plot with a sharex
x-axis but independent y axis. The y-axis of self will have
ticks on left and the returned axes will have ticks on the
right
"""
if axes_class is None:
axes_class = self._get_base_axes()
parasite_axes_class = parasite_axes_class_factory(axes_class)
ax2 = parasite_axes_class(self, sharex=self, frameon=False)
self.parasites.append(ax2)
# for normal axes
self.axis["right"].toggle(all=False)
self.axis["right"].line.set_visible(True)
ax2.axis["right"].set_visible(True)
ax2.axis["left","top", "bottom"].toggle(all=False)
ax2.axis["left","top", "bottom"].line.set_visible(False)
ax2.axis["right"].toggle(all=True)
ax2.axis["right"].line.set_visible(False)
return ax2
def twiny(self, axes_class=None):
"""
create a twin of Axes for generating a plot with a shared
y-axis but independent x axis. The x-axis of self will have
ticks on bottom and the returned axes will have ticks on the
top
"""
if axes_class is None:
axes_class = self._get_base_axes()
parasite_axes_class = parasite_axes_class_factory(axes_class)
ax2 = parasite_axes_class(self, sharey=self, frameon=False)
self.parasites.append(ax2)
self.axis["top"].toggle(all=False)
self.axis["top"].line.set_visible(True)
ax2.axis["top"].set_visible(True)
ax2.axis["left","right", "bottom"].toggle(all=False)
ax2.axis["left","right", "bottom"].line.set_visible(False)
ax2.axis["top"].toggle(all=True)
ax2.axis["top"].line.set_visible(False)
return ax2
def twin(self, aux_trans=None, axes_class=None):
"""
create a twin of Axes for generating a plot with a sharex
x-axis but independent y axis. The y-axis of self will have
ticks on left and the returned axes will have ticks on the
right
"""
if axes_class is None:
axes_class = self._get_base_axes()
parasite_axes_auxtrans_class = parasite_axes_auxtrans_class_factory(axes_class)
if aux_trans is None:
ax2 = parasite_axes_auxtrans_class(self, mtransforms.IdentityTransform(),
viewlim_mode="equal",
)
else:
ax2 = parasite_axes_auxtrans_class(self, aux_trans,
viewlim_mode="transform",
)
self.parasites.append(ax2)
# for normal axes
#self.yaxis.tick_left()
#self.xaxis.tick_bottom()
#ax2.yaxis.tick_right()
#ax2.xaxis.set_visible(True)
#ax2.yaxis.set_visible(True)
#ax2.yaxis.set_label_position('right')
##ax2.xaxis.tick_top()
#ax2.xaxis.set_label_position('top')
self.axis["top","right"].toggle(all=False)
self.axis["top","right"].line.set_visible(False)
#self.axis["left","bottom"].toggle(label=True)
ax2.axis["top","right"].set_visible(True)
ax2.axis["bottom","left"].toggle(all=False)
ax2.axis["bottom","left"].line.set_visible(False)
ax2.axis["top","right"].toggle(all=True)
ax2.axis["top","right"].line.set_visible(True)
# # for axisline axes
# self._axislines["right"].set_visible(False)
# self._axislines["top"].set_visible(False)
# ax2._axislines["left"].set_visible(False)
# ax2._axislines["bottom"].set_visible(False)
# ax2._axislines["right"].set_visible(True)
# ax2._axislines["top"].set_visible(True)
# ax2._axislines["right"].major_ticklabels.set_visible(True)
# ax2._axislines["top"].major_ticklabels.set_visible(True)
return ax2
def get_tightbbox(self, renderer, call_axes_locator=True):
bbs = [ax.get_tightbbox(renderer, call_axes_locator) \
for ax in self.parasites]
get_tightbbox = self._get_base_axes_attr("get_tightbbox")
bbs.append(get_tightbbox(self, renderer, call_axes_locator))
_bbox = Bbox.union([b for b in bbs if b.width!=0 or b.height!=0])
return _bbox
_host_axes_classes = {}
def host_axes_class_factory(axes_class=None):
if axes_class is None:
axes_class = Axes
new_class = _host_axes_classes.get(axes_class)
if new_class is None:
def _get_base_axes(self):
return axes_class
def _get_base_axes_attr(self, attrname):
return getattr(axes_class, attrname)
new_class = type(str("%sHostAxes" % (axes_class.__name__)),
(HostAxesBase, axes_class),
{'_get_base_axes_attr': _get_base_axes_attr,
'_get_base_axes': _get_base_axes})
_host_axes_classes[axes_class] = new_class
return new_class
def host_subplot_class_factory(axes_class):
host_axes_class = host_axes_class_factory(axes_class=axes_class)
subplot_host_class = subplot_class_factory(host_axes_class)
return subplot_host_class
HostAxes = host_axes_class_factory(axes_class=Axes)
SubplotHost = subplot_class_factory(HostAxes)
def host_axes(*args, **kwargs):
import matplotlib.pyplot as plt
axes_class = kwargs.pop("axes_class", None)
host_axes_class = host_axes_class_factory(axes_class)
fig = plt.gcf()
ax = host_axes_class(fig, *args, **kwargs)
fig.add_axes(ax)
plt.draw_if_interactive()
return ax
def host_subplot(*args, **kwargs):
import matplotlib.pyplot as plt
axes_class = kwargs.pop("axes_class", None)
host_subplot_class = host_subplot_class_factory(axes_class)
fig = plt.gcf()
ax = host_subplot_class(fig, *args, **kwargs)
fig.add_subplot(ax)
plt.draw_if_interactive()
return ax
|
gpl-3.0
|
NelisVerhoef/scikit-learn
|
benchmarks/bench_multilabel_metrics.py
|
276
|
7138
|
#!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
|
bsd-3-clause
|
BryanCutler/spark
|
python/pyspark/pandas/tests/plot/test_frame_plot_plotly.py
|
1
|
9856
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from distutils.version import LooseVersion
import pprint
import pandas as pd
import numpy as np
from pyspark import pandas as ps
from pyspark.pandas.config import set_option, reset_option
from pyspark.pandas.testing.utils import ReusedSQLTestCase, TestUtils, have_plotly
from pyspark.pandas.utils import name_like_string
if have_plotly:
from plotly import express
import plotly.graph_objs as go
@unittest.skipIf(
not have_plotly or LooseVersion(pd.__version__) < "1.0.0",
"plotly is not installed or pandas<1.0. pandas<1.0 does not support latest plotly "
"and/or 'plotting.backend' option.",
)
class DataFramePlotPlotlyTest(ReusedSQLTestCase, TestUtils):
@classmethod
def setUpClass(cls):
super().setUpClass()
pd.set_option("plotting.backend", "plotly")
set_option("plotting.backend", "plotly")
set_option("plotting.max_rows", 2000)
set_option("plotting.sample_ratio", None)
@classmethod
def tearDownClass(cls):
pd.reset_option("plotting.backend")
reset_option("plotting.backend")
reset_option("plotting.max_rows")
reset_option("plotting.sample_ratio")
super().tearDownClass()
@property
def pdf1(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50], "b": [2, 3, 4, 5, 7, 9, 10, 15, 34, 45, 49]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9, 10, 10],
)
@property
def kdf1(self):
return ps.from_pandas(self.pdf1)
def test_line_plot(self):
def check_line_plot(pdf, kdf):
self.assertEqual(pdf.plot(kind="line"), kdf.plot(kind="line"))
self.assertEqual(pdf.plot.line(), kdf.plot.line())
pdf1 = self.pdf1
kdf1 = self.kdf1
check_line_plot(pdf1, kdf1)
def test_area_plot(self):
def check_area_plot(pdf, kdf):
self.assertEqual(pdf.plot(kind="area"), kdf.plot(kind="area"))
self.assertEqual(pdf.plot.area(), kdf.plot.area())
pdf = self.pdf1
kdf = self.kdf1
check_area_plot(pdf, kdf)
def test_area_plot_y(self):
def check_area_plot_y(pdf, kdf, y):
self.assertEqual(pdf.plot.area(y=y), kdf.plot.area(y=y))
# test if frame area plot is correct when y is specified
pdf = pd.DataFrame(
{
"sales": [3, 2, 3, 9, 10, 6],
"signups": [5, 5, 6, 12, 14, 13],
"visits": [20, 42, 28, 62, 81, 50],
},
index=pd.date_range(start="2018/01/01", end="2018/07/01", freq="M"),
)
kdf = ps.from_pandas(pdf)
check_area_plot_y(pdf, kdf, y="sales")
def test_barh_plot_with_x_y(self):
def check_barh_plot_with_x_y(pdf, kdf, x, y):
self.assertEqual(pdf.plot(kind="barh", x=x, y=y), kdf.plot(kind="barh", x=x, y=y))
self.assertEqual(pdf.plot.barh(x=x, y=y), kdf.plot.barh(x=x, y=y))
# this is testing plot with specified x and y
pdf1 = pd.DataFrame({"lab": ["A", "B", "C"], "val": [10, 30, 20]})
kdf1 = ps.from_pandas(pdf1)
check_barh_plot_with_x_y(pdf1, kdf1, x="lab", y="val")
def test_barh_plot(self):
def check_barh_plot(pdf, kdf):
self.assertEqual(pdf.plot(kind="barh"), kdf.plot(kind="barh"))
self.assertEqual(pdf.plot.barh(), kdf.plot.barh())
# this is testing when x or y is not assigned
pdf1 = pd.DataFrame({"lab": [20.1, 40.5, 60.6], "val": [10, 30, 20]})
kdf1 = ps.from_pandas(pdf1)
check_barh_plot(pdf1, kdf1)
def test_bar_plot(self):
def check_bar_plot(pdf, kdf):
self.assertEqual(pdf.plot(kind="bar"), kdf.plot(kind="bar"))
self.assertEqual(pdf.plot.bar(), kdf.plot.bar())
pdf1 = self.pdf1
kdf1 = self.kdf1
check_bar_plot(pdf1, kdf1)
def test_bar_with_x_y(self):
# this is testing plot with specified x and y
pdf = pd.DataFrame({"lab": ["A", "B", "C"], "val": [10, 30, 20]})
kdf = ps.from_pandas(pdf)
self.assertEqual(
pdf.plot(kind="bar", x="lab", y="val"), kdf.plot(kind="bar", x="lab", y="val")
)
self.assertEqual(pdf.plot.bar(x="lab", y="val"), kdf.plot.bar(x="lab", y="val"))
def test_scatter_plot(self):
def check_scatter_plot(pdf, kdf, x, y, c):
self.assertEqual(pdf.plot.scatter(x=x, y=y), kdf.plot.scatter(x=x, y=y))
self.assertEqual(pdf.plot(kind="scatter", x=x, y=y), kdf.plot(kind="scatter", x=x, y=y))
# check when keyword c is given as name of a column
self.assertEqual(
pdf.plot.scatter(x=x, y=y, c=c, s=50), kdf.plot.scatter(x=x, y=y, c=c, s=50)
)
# Use pandas scatter plot example
pdf1 = pd.DataFrame(np.random.rand(50, 4), columns=["a", "b", "c", "d"])
kdf1 = ps.from_pandas(pdf1)
check_scatter_plot(pdf1, kdf1, x="a", y="b", c="c")
def test_pie_plot(self):
def check_pie_plot(kdf):
pdf = kdf.to_pandas()
self.assertEqual(
kdf.plot(kind="pie", y=kdf.columns[0]),
express.pie(pdf, values="a", names=pdf.index),
)
self.assertEqual(
kdf.plot(kind="pie", values="a"), express.pie(pdf, values="a"),
)
kdf1 = self.kdf1
check_pie_plot(kdf1)
# TODO: support multi-index columns
# columns = pd.MultiIndex.from_tuples([("x", "y"), ("y", "z")])
# kdf1.columns = columns
# check_pie_plot(kdf1)
# TODO: support multi-index
# kdf1 = ps.DataFrame(
# {
# "a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50],
# "b": [2, 3, 4, 5, 7, 9, 10, 15, 34, 45, 49]
# },
# index=pd.MultiIndex.from_tuples([("x", "y")] * 11),
# )
# check_pie_plot(kdf1)
def test_hist_plot(self):
def check_hist_plot(kdf):
bins = np.array([1.0, 5.9, 10.8, 15.7, 20.6, 25.5, 30.4, 35.3, 40.2, 45.1, 50.0])
data = [
np.array([5.0, 4.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]),
np.array([4.0, 3.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0]),
]
prev = bins[0]
text_bins = []
for b in bins[1:]:
text_bins.append("[%s, %s)" % (prev, b))
prev = b
text_bins[-1] = text_bins[-1][:-1] + "]"
bins = 0.5 * (bins[:-1] + bins[1:])
name_a = name_like_string(kdf.columns[0])
name_b = name_like_string(kdf.columns[1])
bars = [
go.Bar(
x=bins,
y=data[0],
name=name_a,
text=text_bins,
hovertemplate=("variable=" + name_a + "<br>value=%{text}<br>count=%{y}"),
),
go.Bar(
x=bins,
y=data[1],
name=name_b,
text=text_bins,
hovertemplate=("variable=" + name_b + "<br>value=%{text}<br>count=%{y}"),
),
]
fig = go.Figure(data=bars, layout=go.Layout(barmode="stack"))
fig["layout"]["xaxis"]["title"] = "value"
fig["layout"]["yaxis"]["title"] = "count"
self.assertEqual(
pprint.pformat(kdf.plot(kind="hist").to_dict()), pprint.pformat(fig.to_dict())
)
kdf1 = self.kdf1
check_hist_plot(kdf1)
columns = pd.MultiIndex.from_tuples([("x", "y"), ("y", "z")])
kdf1.columns = columns
check_hist_plot(kdf1)
def test_kde_plot(self):
kdf = ps.DataFrame({"a": [1, 2, 3, 4, 5], "b": [1, 3, 5, 7, 9], "c": [2, 4, 6, 8, 10]})
pdf = pd.DataFrame(
{
"Density": [
0.03515491,
0.06834979,
0.00663503,
0.02372059,
0.06834979,
0.01806934,
0.01806934,
0.06834979,
0.02372059,
],
"names": ["a", "a", "a", "b", "b", "b", "c", "c", "c"],
"index": [-3.5, 5.5, 14.5, -3.5, 5.5, 14.5, -3.5, 5.5, 14.5],
}
)
actual = kdf.plot.kde(bw_method=5, ind=3)
expected = express.line(pdf, x="index", y="Density", color="names")
expected["layout"]["xaxis"]["title"] = None
self.assertEqual(pprint.pformat(actual.to_dict()), pprint.pformat(expected.to_dict()))
if __name__ == "__main__":
from pyspark.pandas.tests.plot.test_frame_plot_plotly import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
apache-2.0
|
krasch/smart-assistants
|
recsys/classifiers/bayes.py
|
1
|
6742
|
# -*- coding: UTF-8 -*-
import pandas
import numpy
from base import BaseClassifier
class NaiveBayesClassifier(BaseClassifier):
"""
Implements a standard Naive Bayes classifier.
"""
name = "NaiveBayes"
def __init__(self, features, target_names):
BaseClassifier.__init__(self, features, target_names)
def fit(self, train_data,train_target):
"""
Train the classifier.
@param train_data: A matrix with len(self.features) columns and one row for each instance in the dataset. Each
row describes a user situation with current sensor settings and information on how long these settings have
not changed.
@param train_target: An array of targets (length of the array corresponds to the number of rows in train_data).
Each target represents the action that user performed in the situation described by the corresponding row
in train_data.
@return: self-reference for this classifier
"""
def calculate_priors(data):
"""
Count how often each target (user action) was seen overall. Performs additive smoothing and normalizes
the counts.
@param data:
@return:
"""
#count how often each target was seen, set to 0 if target has never been seen
counts_per_target = data.index.to_series().value_counts(sort=False)
counts_per_target = counts_per_target.reindex(self.target_names).fillna(0)
#additive smoothing (add one to every count), necessary so that NaiveBayes does not degrade for zero-counts
counts_per_target += 1
#normalize
normalized_counts = counts_per_target.div(counts_per_target.sum())
return normalized_counts.values
def calculate_counts_per_setting(data):
"""
Count how often each target was seen in each setting. Performs additive smoothing and normalizes
the counts.
@param data:
@return:
"""
#count how often each target was seen in each settings, set to 0 if target has never been seen in a setting
counts_per_setting = data.groupby(data.index, sort=False).sum()
counts_per_setting = counts_per_setting.reindex(self.target_names).fillna(0)
#additive smoothing (add one to every count), necessary so that NaiveBayes does not degrade for zero-counts
counts_per_setting += 1
#normalize the counts per sensor
normalize = lambda counts_per_sensor: counts_per_sensor.div(counts_per_sensor.sum(axis=1), axis=0)
normalized_counts = counts_per_setting.groupby(lambda (sensor, value): sensor, axis=1).transform(normalize)
#convert to dictionary of numpy arrays for faster calculations later on
normalized_counts = {setting: normalized_counts[setting].values for setting in normalized_counts.columns}
return normalized_counts
#load training data and targets into pandas dataframe
train_data = pandas.DataFrame(train_data)
train_data.columns = self.features
train_data.index = train_target
#keep only the columns with current sensor settings, since Naive Bayes does not use timedeltas
train_data = train_data[self.settings_columns]
#calculate how often each target was seen and how often it was seen in specific settings
self.priors = calculate_priors(train_data)
self.counts = calculate_counts_per_setting(train_data)
return self
def predict(self, test_data):
"""
Calculate recommendations for the test_data
@param test_data: A matrix with len(self.features) columns and one row for each instance in the dataset. Each
row describes a user situation with current sensor settings and information on how long these settings have
not changed.
@return: Resulting recommendations for each instance in the dataset (a list of list of strings).
"""
#load test data into pandas dataframe
test_data = pandas.DataFrame(test_data)
test_data.columns = self.features
#keep only the columns with current sensor settings, since Naive Bayes does not use timedeltas
test_data = test_data[self.settings_columns].values
#calculate sorted recommendations for one instance
def predict_for_instance(instance):
#find which sensor values are currently set
currently_set = self.currently_set(instance)
#calculate which targets (user actions) are currently possible (possible targets are represented by 1,
#not currently possible targets are represented by 0)
possible_targets_mask = self.possible_targets_mask(currently_set)
#lookup observations for the current settings
counts = [self.counts[setting] for setting in currently_set]
#calculate posteriors, set posteriors for not currently possible targets to zero and normalize
posteriors = reduce(numpy.multiply, counts) * self.priors
posteriors = posteriors * possible_targets_mask
normalized_posteriors = posteriors / posteriors.sum()
#map resulting posteriors to the possible targets and sort
recommendations = {target: posterior for target, posterior, target_is_possible
in zip(self.target_names, normalized_posteriors, possible_targets_mask)
if target_is_possible}
sorted_recommendations = sorted(recommendations, key=recommendations.get, reverse=True)
return sorted_recommendations
#calculate recommendations for every instance in the test dataset
results = [predict_for_instance(test_data[i]) for i in range(len(test_data))]
return results
def print_counts_and_priors(self):
"""
Simple debugging method that prints out the calculated counts and priors after the classifier has been trained.
@return:
"""
line_to_string = lambda target, count: "%s %.2f" % (target, count)
print "\n".join([line_to_string(target, count) for target, count in self.priors.iteritems()])
format_line = lambda target, (sensor, value), count: "%s %s %s %.2f" % (target, sensor, value, count)
output_for_target = lambda target: "\n".join([format_line(target, setting, count)
for setting, count in sorted(self.counts.loc[target].iteritems())])
print "\n".join([output_for_target(target) for target in sorted(self.counts.index)])
|
mit
|
jpautom/scikit-learn
|
sklearn/linear_model/tests/test_omp.py
|
272
|
7752
|
# Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
|
bsd-3-clause
|
louisLouL/pair_trading
|
capstone_env/lib/python3.6/site-packages/mpl_toolkits/axes_grid1/anchored_artists.py
|
2
|
13214
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib import docstring
from matplotlib.offsetbox import (AnchoredOffsetbox, AuxTransformBox,
DrawingArea, TextArea, VPacker)
from matplotlib.patches import Rectangle, Ellipse
__all__ = ['AnchoredDrawingArea', 'AnchoredAuxTransformBox',
'AnchoredEllipse', 'AnchoredSizeBar']
class AnchoredDrawingArea(AnchoredOffsetbox):
@docstring.dedent
def __init__(self, width, height, xdescent, ydescent,
loc, pad=0.4, borderpad=0.5, prop=None, frameon=True,
**kwargs):
"""
An anchored container with a fixed size and fillable DrawingArea.
Artists added to the *drawing_area* will have their coordinates
interpreted as pixels. Any transformations set on the artists will be
overridden.
Parameters
----------
width, height : int or float
width and height of the container, in pixels.
xdescent, ydescent : int or float
descent of the container in the x- and y- direction, in pixels.
loc : int
Location of this artist. Valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10
pad : int or float, optional
Padding around the child objects, in fraction of the font
size. Defaults to 0.4.
borderpad : int or float, optional
Border padding, in fraction of the font size.
Defaults to 0.5.
prop : `matplotlib.font_manager.FontProperties`, optional
Font property used as a reference for paddings.
frameon : bool, optional
If True, draw a box around this artists. Defaults to True.
**kwargs :
Keyworded arguments to pass to
:class:`matplotlib.offsetbox.AnchoredOffsetbox`.
Attributes
----------
drawing_area : `matplotlib.offsetbox.DrawingArea`
A container for artists to display.
Examples
--------
To display blue and red circles of different sizes in the upper right
of an axes *ax*:
>>> ada = AnchoredDrawingArea(20, 20, 0, 0, loc=1, frameon=False)
>>> ada.drawing_area.add_artist(Circle((10, 10), 10, fc="b"))
>>> ada.drawing_area.add_artist(Circle((30, 10), 5, fc="r"))
>>> ax.add_artist(ada)
"""
self.da = DrawingArea(width, height, xdescent, ydescent)
self.drawing_area = self.da
super(AnchoredDrawingArea, self).__init__(
loc, pad=pad, borderpad=borderpad, child=self.da, prop=None,
frameon=frameon, **kwargs
)
class AnchoredAuxTransformBox(AnchoredOffsetbox):
@docstring.dedent
def __init__(self, transform, loc,
pad=0.4, borderpad=0.5, prop=None, frameon=True, **kwargs):
"""
An anchored container with transformed coordinates.
Artists added to the *drawing_area* are scaled according to the
coordinates of the transformation used. The dimensions of this artist
will scale to contain the artists added.
Parameters
----------
transform : `matplotlib.transforms.Transform`
The transformation object for the coordinate system in use, i.e.,
:attr:`matplotlib.axes.Axes.transData`.
loc : int
Location of this artist. Valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10
pad : int or float, optional
Padding around the child objects, in fraction of the font
size. Defaults to 0.4.
borderpad : int or float, optional
Border padding, in fraction of the font size.
Defaults to 0.5.
prop : `matplotlib.font_manager.FontProperties`, optional
Font property used as a reference for paddings.
frameon : bool, optional
If True, draw a box around this artists. Defaults to True.
**kwargs :
Keyworded arguments to pass to
:class:`matplotlib.offsetbox.AnchoredOffsetbox`.
Attributes
----------
drawing_area : `matplotlib.offsetbox.AuxTransformBox`
A container for artists to display.
Examples
--------
To display an ellipse in the upper left, with a width of 0.1 and
height of 0.4 in data coordinates:
>>> box = AnchoredAuxTransformBox(ax.transData, loc=2)
>>> el = Ellipse((0,0), width=0.1, height=0.4, angle=30)
>>> box.drawing_area.add_artist(el)
>>> ax.add_artist(box)
"""
self.drawing_area = AuxTransformBox(transform)
AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad,
child=self.drawing_area,
prop=prop,
frameon=frameon,
**kwargs)
class AnchoredEllipse(AnchoredOffsetbox):
@docstring.dedent
def __init__(self, transform, width, height, angle, loc,
pad=0.1, borderpad=0.1, prop=None, frameon=True, **kwargs):
"""
Draw an anchored ellipse of a given size.
Parameters
----------
transform : `matplotlib.transforms.Transform`
The transformation object for the coordinate system in use, i.e.,
:attr:`matplotlib.axes.Axes.transData`.
width, height : int or float
Width and height of the ellipse, given in coordinates of
*transform*.
angle : int or float
Rotation of the ellipse, in degrees, anti-clockwise.
loc : int
Location of this size bar. Valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10
pad : int or float, optional
Padding around the ellipse, in fraction of the font size. Defaults
to 0.1.
borderpad : int or float, optional
Border padding, in fraction of the font size. Defaults to 0.1.
frameon : bool, optional
If True, draw a box around the ellipse. Defaults to True.
prop : `matplotlib.font_manager.FontProperties`, optional
Font property used as a reference for paddings.
**kwargs :
Keyworded arguments to pass to
:class:`matplotlib.offsetbox.AnchoredOffsetbox`.
Attributes
----------
ellipse : `matplotlib.patches.Ellipse`
Ellipse patch drawn.
"""
self._box = AuxTransformBox(transform)
self.ellipse = Ellipse((0, 0), width, height, angle)
self._box.add_artist(self.ellipse)
AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad,
child=self._box,
prop=prop,
frameon=frameon, **kwargs)
class AnchoredSizeBar(AnchoredOffsetbox):
@docstring.dedent
def __init__(self, transform, size, label, loc,
pad=0.1, borderpad=0.1, sep=2,
frameon=True, size_vertical=0, color='black',
label_top=False, fontproperties=None, fill_bar=None,
**kwargs):
"""
Draw a horizontal scale bar with a center-aligned label underneath.
Parameters
----------
transform : `matplotlib.transforms.Transform`
The transformation object for the coordinate system in use, i.e.,
:attr:`matplotlib.axes.Axes.transData`.
size : int or float
Horizontal length of the size bar, given in coordinates of
*transform*.
label : str
Label to display.
loc : int
Location of this size bar. Valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10
pad : int or float, optional
Padding around the label and size bar, in fraction of the font
size. Defaults to 0.1.
borderpad : int or float, optional
Border padding, in fraction of the font size.
Defaults to 0.1.
sep : int or float, optional
Seperation between the label and the size bar, in points.
Defaults to 2.
frameon : bool, optional
If True, draw a box around the horizontal bar and label.
Defaults to True.
size_vertical : int or float, optional
Vertical length of the size bar, given in coordinates of
*transform*. Defaults to 0.
color : str, optional
Color for the size bar and label.
Defaults to black.
label_top : bool, optional
If True, the label will be over the size bar.
Defaults to False.
fontproperties : `matplotlib.font_manager.FontProperties`, optional
Font properties for the label text.
fill_bar : bool, optional
If True and if size_vertical is nonzero, the size bar will
be filled in with the color specified by the size bar.
Defaults to True if `size_vertical` is greater than
zero and False otherwise.
**kwargs :
Keyworded arguments to pass to
:class:`matplotlib.offsetbox.AnchoredOffsetbox`.
Attributes
----------
size_bar : `matplotlib.offsetbox.AuxTransformBox`
Container for the size bar.
txt_label : `matplotlib.offsetbox.TextArea`
Container for the label of the size bar.
Notes
-----
If *prop* is passed as a keyworded argument, but *fontproperties* is
not, then *prop* is be assumed to be the intended *fontproperties*.
Using both *prop* and *fontproperties* is not supported.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from mpl_toolkits.axes_grid1.anchored_artists import \
AnchoredSizeBar
>>> fig, ax = plt.subplots()
>>> ax.imshow(np.random.random((10,10)))
>>> bar = AnchoredSizeBar(ax.transData, 3, '3 data units', 4)
>>> ax.add_artist(bar)
>>> fig.show()
Using all the optional parameters
>>> import matplotlib.font_manager as fm
>>> fontprops = fm.FontProperties(size=14, family='monospace')
>>> bar = AnchoredSizeBar(ax.transData, 3, '3 units', 4, pad=0.5, \
sep=5, borderpad=0.5, frameon=False, \
size_vertical=0.5, color='white', \
fontproperties=fontprops)
"""
if fill_bar is None:
fill_bar = size_vertical > 0
self.size_bar = AuxTransformBox(transform)
self.size_bar.add_artist(Rectangle((0, 0), size, size_vertical,
fill=fill_bar, facecolor=color,
edgecolor=color))
if fontproperties is None and 'prop' in kwargs:
fontproperties = kwargs.pop('prop')
if fontproperties is None:
textprops = {'color': color}
else:
textprops = {'color': color, 'fontproperties': fontproperties}
self.txt_label = TextArea(
label,
minimumdescent=False,
textprops=textprops)
if label_top:
_box_children = [self.txt_label, self.size_bar]
else:
_box_children = [self.size_bar, self.txt_label]
self._box = VPacker(children=_box_children,
align="center",
pad=0, sep=sep)
AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad,
child=self._box,
prop=fontproperties,
frameon=frameon, **kwargs)
|
mit
|
mkukielka/oddt
|
oddt/spatial.py
|
2
|
9648
|
"""Spatial functions included in ODDT
Mainly used by other modules, but can be accessed directly.
"""
from math import sin, cos
import numpy as np
from scipy.spatial.distance import cdist
# for Hungarian algorithm, in future use scipy.optimize.linear_sum_assignment (in scipy 0.17+)
try:
from scipy.optimize import linear_sum_assignment
except ImportError:
from sklearn.utils.linear_assignment_ import linear_assignment
def linear_sum_assignment(M):
out = linear_assignment(M)
return out[:, 0], out[:, 1]
import oddt
from oddt.utils import is_openbabel_molecule
__all__ = ['angle',
'angle_2v',
'dihedral',
'distance',
'rmsd',
'rotate']
def angle(p1, p2, p3):
"""Returns an angle from a series of 3 points (point #2 is centroid).
Angle is returned in degrees.
Parameters
----------
p1,p2,p3 : numpy arrays, shape = [n_points, n_dimensions]
Triplets of points in n-dimensional space, aligned in rows.
Returns
-------
angles : numpy array, shape = [n_points]
Series of angles in degrees
"""
v1 = p1 - p2
v2 = p3 - p2
return angle_2v(v1, v2)
def angle_2v(v1, v2):
"""Returns an angle between two vecors.Angle is returned in degrees.
Parameters
----------
v1,v2 : numpy arrays, shape = [n_vectors, n_dimensions]
Pairs of vectors in n-dimensional space, aligned in rows.
Returns
-------
angles : numpy array, shape = [n_vectors]
Series of angles in degrees
"""
# better than np.dot(v1, v2), multiple vectors can be applied
dot = (v1 * v2).sum(axis=-1)
norm = np.linalg.norm(v1, axis=-1) * np.linalg.norm(v2, axis=-1)
return np.degrees(np.arccos(np.clip(dot/norm, -1, 1)))
def dihedral(p1, p2, p3, p4):
"""Returns an dihedral angle from a series of 4 points.
Dihedral is returned in degrees.
Function distingishes clockwise and antyclockwise dihedrals.
Parameters
----------
p1, p2, p3, p4 : numpy arrays, shape = [n_points, n_dimensions]
Quadruplets of points in n-dimensional space, aligned in rows.
Returns
-------
angles : numpy array, shape = [n_points]
Series of angles in degrees
"""
v12 = (p1 - p2)/np.linalg.norm(p1 - p2)
v23 = (p2 - p3)/np.linalg.norm(p2 - p3)
v34 = (p3 - p4)/np.linalg.norm(p3 - p4)
c1 = np.cross(v12, v23)
c2 = np.cross(v23, v34)
out = angle_2v(c1, c2)
# check clockwise and anticlockwise
n1 = c1 / np.linalg.norm(c1)
mask = (n1 * v34).sum(axis=-1) > 0
if len(mask.shape) == 0:
if mask:
out = -out
else:
out[mask] = -out[mask]
return out
def rmsd(ref, mol, ignore_h=True, method=None, normalize=False):
"""Computes root mean square deviation (RMSD) between two molecules
(including or excluding Hydrogens). No symmetry checks are performed.
Parameters
----------
ref : oddt.toolkit.Molecule object
Reference molecule for the RMSD calculation
mol : oddt.toolkit.Molecule object
Query molecule for RMSD calculation
ignore_h : bool (default=False)
Flag indicating to ignore Hydrogen atoms while performing RMSD
calculation. This toggle works only with 'hungarian' method and without
sorting (method=None).
method : str (default=None)
The method to be used for atom asignment between ref and mol.
None means that direct matching is applied, which is the default
behavior.
Available methods:
- canonize - match heavy atoms using canonical ordering (it forces
ignoring H's)
- hungarian - minimize RMSD using Hungarian algorithm
- min_symmetry - makes multiple molecule-molecule matches and finds
minimal RMSD (the slowest). Hydrogens are ignored.
normalize : bool (default=False)
Normalize RMSD by square root of rot. bonds
Returns
-------
rmsd : float
RMSD between two molecules
"""
if method == 'canonize':
ref_atoms = ref.coords[ref.canonic_order]
mol_atoms = mol.coords[mol.canonic_order]
elif method == 'hungarian':
mol_map = []
ref_map = []
for a_type in np.unique(mol.atom_dict['atomtype']):
if a_type != 'H' or not ignore_h:
mol_idx = np.argwhere(mol.atom_dict['atomtype'] == a_type).flatten()
ref_idx = np.argwhere(ref.atom_dict['atomtype'] == a_type).flatten()
if len(mol_idx) != len(ref_idx):
raise ValueError('Unequal number of atoms type: %s' % a_type)
if len(mol_idx) == 1:
mol_map.append(mol_idx)
ref_map.append(ref_idx)
continue
M = distance(mol.atom_dict['coords'][mol_idx],
ref.atom_dict['coords'][ref_idx])
M = M - M.min(axis=0) - M.min(axis=1).reshape(-1, 1)
tmp_mol, tmp_ref = linear_sum_assignment(M)
mol_map.append(mol_idx[tmp_mol])
ref_map.append(ref_idx[tmp_ref])
mol_atoms = mol.atom_dict['coords'][np.hstack(mol_map)]
ref_atoms = ref.atom_dict['coords'][np.hstack(ref_map)]
elif method == 'min_symmetry':
min_rmsd = None
ref_atoms = ref.atom_dict[ref.atom_dict['atomicnum'] != 1]['coords']
mol_atoms = mol.atom_dict[mol.atom_dict['atomicnum'] != 1]['coords']
# safety swith to check if number of heavy atoms match
if ref_atoms.shape == mol_atoms.shape:
# match mol to ref, generate all matches to find best RMSD
matches = oddt.toolkit.Smarts(ref).findall(mol, unique=False)
if not matches:
raise ValueError('Could not find any match between molecules.')
# calculate RMSD between all matches and retain the smallest
for match in matches:
match = np.array(match, dtype=int)
if is_openbabel_molecule(mol):
match -= 1 # OB has 1-based indices
tmp_dict = mol.atom_dict[match]
mol_atoms = tmp_dict[tmp_dict['atomicnum'] != 1]['coords']
# following should not happen, although safety check is left
if mol_atoms.shape != ref_atoms.shape:
raise Exception('Molecular match got wrong number of atoms.')
rmsd = np.sqrt(((mol_atoms - ref_atoms)**2).sum(axis=-1).mean())
if min_rmsd is None or rmsd < min_rmsd:
min_rmsd = rmsd
return min_rmsd
elif ignore_h:
mol_atoms = mol.coords[mol.atom_dict['atomicnum'] != 1]
ref_atoms = ref.coords[ref.atom_dict['atomicnum'] != 1]
else:
mol_atoms = mol.coords
ref_atoms = ref.coords
if mol_atoms.shape == ref_atoms.shape:
rmsd = np.sqrt(((mol_atoms - ref_atoms)**2).sum(axis=-1).mean())
if normalize:
rmsd /= np.sqrt(mol.num_rotors)
return rmsd
# at this point raise an exception
raise ValueError('Unequal number of atoms in molecules (%i and %i)'
% (len(mol_atoms), len(ref_atoms)))
def distance(x, y):
"""Computes distance between each pair of points from x and y.
Parameters
----------
x : numpy arrays, shape = [n_x, 3]
Array of poinds in 3D
y : numpy arrays, shape = [n_y, 3]
Array of poinds in 3D
Returns
-------
dist_matrix : numpy arrays, shape = [n_x, n_y]
Distance matrix
"""
return cdist(x, y)
def distance_complex(x, y):
""" Computes distance between points, similar to distance(cdist),
with major difference - allows higher dimmentions of input (cdist supports 2).
distance is purely float64 and can de slightly more precise.
Parameters
----------
x : numpy arrays, shape = [..., 3]
Array of poinds in 3D
y : numpy arrays, shape = [..., 3]
Array of poinds in 3D
Returns
-------
dist_matrix : numpy arrays
Distance matrix
"""
return np.linalg.norm(x[..., np.newaxis, :] - y, axis=-1)
def rotate(coords, alpha, beta, gamma):
"""Rotate coords by cerain angle in X, Y, Z. Angles are specified in radians.
Parameters
----------
coords : numpy arrays, shape = [n_points, 3]
Coordinates in 3-dimensional space.
alpha, beta, gamma: float
Angles to rotate the coordinates along X, Y and Z axis.
Angles are specified in radians.
Returns
-------
new_coords : numpy arrays, shape = [n_points, 3]
Rorated coordinates in 3-dimensional space.
"""
centroid = coords.mean(axis=0)
coords = coords - centroid
sin_alpha = sin(alpha)
cos_alpha = cos(alpha)
sin_beta = sin(beta)
cos_beta = cos(beta)
sin_gamma = sin(gamma)
cos_gamma = cos(gamma)
rot_matrix = np.array([[cos_beta * cos_gamma,
sin_alpha * sin_beta * cos_gamma - cos_alpha * sin_gamma,
cos_alpha * sin_beta * cos_gamma + sin_alpha * sin_gamma],
[cos_beta * sin_gamma,
sin_alpha * sin_beta * sin_gamma + cos_alpha * cos_gamma,
cos_alpha * sin_beta * sin_gamma - sin_alpha * cos_gamma],
[-sin_beta,
sin_alpha * cos_beta,
cos_alpha * cos_beta]])
return (coords[:, np.newaxis, :] * rot_matrix).sum(axis=2) + centroid
|
bsd-3-clause
|
jrleeman/MetPy
|
metpy/tests/test_units.py
|
1
|
8001
|
# Copyright (c) 2017 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
r"""Tests the operation of MetPy's unit support code."""
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytest
from metpy.testing import assert_array_almost_equal, assert_array_equal
from metpy.testing import set_agg_backend # noqa: F401
from metpy.units import (atleast_1d, atleast_2d, check_units, concatenate, diff,
pandas_dataframe_to_unit_arrays, units)
def test_concatenate():
"""Test basic functionality of unit-aware concatenate."""
result = concatenate((3 * units.meter, 400 * units.cm))
assert_array_equal(result, np.array([3, 4]) * units.meter)
assert not isinstance(result.m, np.ma.MaskedArray)
def test_concatenate_masked():
"""Test concatenate preserves masks."""
d1 = units.Quantity(np.ma.array([1, 2, 3], mask=[False, True, False]), 'degC')
result = concatenate((d1, 32 * units.degF))
truth = np.ma.array([1, np.inf, 3, 0])
truth[1] = np.ma.masked
assert_array_almost_equal(result, units.Quantity(truth, 'degC'), 6)
assert_array_equal(result.mask, np.array([False, True, False, False]))
@pytest.mark.mpl_image_compare(tolerance=0, remove_text=True)
def test_axhline():
r"""Ensure that passing a quantity to axhline does not error."""
fig, ax = plt.subplots()
ax.axhline(930 * units('mbar'))
ax.set_ylim(900, 950)
ax.set_ylabel('')
return fig
@pytest.mark.mpl_image_compare(tolerance=0, remove_text=True)
def test_axvline():
r"""Ensure that passing a quantity to axvline does not error."""
fig, ax = plt.subplots()
ax.axvline(0 * units('degC'))
ax.set_xlim(-1, 1)
ax.set_xlabel('')
return fig
def test_atleast1d_without_units():
"""Test that atleast_1d wrapper can handle plain arrays."""
assert_array_equal(atleast_1d(1), np.array([1]))
assert_array_equal(atleast_1d([1, ], [2, ]), np.array([[1, ], [2, ]]))
def test_atleast2d_without_units():
"""Test that atleast_2d wrapper can handle plain arrays."""
assert_array_equal(atleast_2d(1), np.array([[1]]))
def test_atleast2d_with_units():
"""Test that atleast_2d wrapper can handle plain array with units."""
assert_array_equal(
atleast_2d(1 * units.degC), np.array([[1]]) * units.degC)
def test_units_diff():
"""Test our diff handles units properly."""
assert_array_equal(diff(np.arange(20, 22) * units.degC),
np.array([1]) * units.delta_degC)
#
# Tests for unit-checking decorator
#
def unit_calc(temp, press, dens, mixing, unitless_const):
r"""Stub calculation for testing unit checking."""
pass
test_funcs = [
check_units('[temperature]', '[pressure]', dens='[mass]/[volume]',
mixing='[dimensionless]')(unit_calc),
check_units(temp='[temperature]', press='[pressure]', dens='[mass]/[volume]',
mixing='[dimensionless]')(unit_calc),
check_units('[temperature]', '[pressure]', '[mass]/[volume]',
'[dimensionless]')(unit_calc)]
@pytest.mark.parametrize('func', test_funcs, ids=['some kwargs', 'all kwargs', 'all pos'])
def test_good_units(func):
r"""Test that unit checking passes good units regardless."""
func(30 * units.degC, 1000 * units.mbar, 1.0 * units('kg/m^3'), 1, 5.)
test_params = [((30 * units.degC, 1000 * units.mb, 1 * units('kg/m^3'), 1, 5 * units('J/kg')),
{}, [('press', '[pressure]', 'millibarn')]),
((30, 1000, 1.0, 1, 5.), {}, [('press', '[pressure]', 'none'),
('temp', '[temperature]', 'none'),
('dens', '[mass]/[volume]', 'none')]),
((30, 1000 * units.mbar),
{'dens': 1.0 * units('kg / m'), 'mixing': 5 * units.m, 'unitless_const': 2},
[('temp', '[temperature]', 'none'),
('dens', '[mass]/[volume]', 'kilogram / meter'),
('mixing', '[dimensionless]', 'meter')])]
@pytest.mark.skipif(sys.version_info < (3, 3), reason='Unit checking requires Python >= 3.3')
@pytest.mark.parametrize('func', test_funcs, ids=['some kwargs', 'all kwargs', 'all pos'])
@pytest.mark.parametrize('args,kwargs,bad_parts', test_params,
ids=['one bad arg', 'all args no units', 'mixed args'])
def test_bad(func, args, kwargs, bad_parts):
r"""Test that unit checking flags appropriate arguments."""
with pytest.raises(ValueError) as exc:
func(*args, **kwargs)
message = str(exc.value)
assert func.__name__ in message
for param in bad_parts:
assert '`{}` requires "{}" but given "{}"'.format(*param) in message
# Should never complain about the const argument
assert 'unitless_const' not in message
def test_pandas_units_simple():
"""Simple unit attachment to two columns."""
df = pd.DataFrame(data=[[1, 4], [2, 5], [3, 6]], columns=['cola', 'colb'])
df_units = {'cola': 'kilometers', 'colb': 'degC'}
res = pandas_dataframe_to_unit_arrays(df, column_units=df_units)
cola_truth = np.array([1, 2, 3]) * units.km
colb_truth = np.array([4, 5, 6]) * units.degC
assert_array_equal(res['cola'], cola_truth)
assert_array_equal(res['colb'], colb_truth)
@pytest.mark.filterwarnings('ignore:Pandas doesn\'t allow columns to be created')
def test_pandas_units_on_dataframe():
"""Unit attachment based on a units attribute to a dataframe."""
df = pd.DataFrame(data=[[1, 4], [2, 5], [3, 6]], columns=['cola', 'colb'])
df.units = {'cola': 'kilometers', 'colb': 'degC'}
res = pandas_dataframe_to_unit_arrays(df)
cola_truth = np.array([1, 2, 3]) * units.km
colb_truth = np.array([4, 5, 6]) * units.degC
assert_array_equal(res['cola'], cola_truth)
assert_array_equal(res['colb'], colb_truth)
@pytest.mark.filterwarnings('ignore:Pandas doesn\'t allow columns to be created')
def test_pandas_units_on_dataframe_not_all_united():
"""Unit attachment with units attribute with a column with no units."""
df = pd.DataFrame(data=[[1, 4], [2, 5], [3, 6]], columns=['cola', 'colb'])
df.units = {'cola': 'kilometers'}
res = pandas_dataframe_to_unit_arrays(df)
cola_truth = np.array([1, 2, 3]) * units.km
colb_truth = np.array([4, 5, 6])
assert_array_equal(res['cola'], cola_truth)
assert_array_equal(res['colb'], colb_truth)
def test_pandas_units_no_units_given():
"""Ensure unit attachment fails if no unit information is given."""
df = pd.DataFrame(data=[[1, 4], [2, 5], [3, 6]], columns=['cola', 'colb'])
with pytest.raises(ValueError):
pandas_dataframe_to_unit_arrays(df)
def test_added_degrees_units():
"""Test that our added degrees units are present in the registry."""
# Test equivalence of abbreviations/aliases to our defined names
assert str(units('degrees_N').units) == 'degrees_north'
assert str(units('degreesN').units) == 'degrees_north'
assert str(units('degree_north').units) == 'degrees_north'
assert str(units('degree_N').units) == 'degrees_north'
assert str(units('degreeN').units) == 'degrees_north'
assert str(units('degrees_E').units) == 'degrees_east'
assert str(units('degreesE').units) == 'degrees_east'
assert str(units('degree_east').units) == 'degrees_east'
assert str(units('degree_E').units) == 'degrees_east'
assert str(units('degreeE').units) == 'degrees_east'
# Test equivalence of our defined units to base units
assert units('degrees_north') == units('degrees')
assert units('degrees_north').to_base_units().units == units.radian
assert units('degrees_east') == units('degrees')
assert units('degrees_east').to_base_units().units == units.radian
def test_gpm_unit():
"""Test that the gpm unit does alias to meters."""
x = 1 * units('gpm')
assert str(x.units) == 'meter'
|
bsd-3-clause
|
fyabc/MiniGames
|
HearthStone2/test/cards_statistic.py
|
1
|
5383
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
from collections import defaultdict
import glob
import sys
import re
import numpy as np
import matplotlib.pyplot as plt
__author__ = 'fyabc'
def _get_int(v):
if v is None:
return None
return int(v)
def _auto_label(rects):
ratios = [rect.get_height() for rect in rects]
_sum_height = sum(ratios)
ratios = [ratio / _sum_height for ratio in ratios]
for rect, ratio in zip(rects, ratios):
height = rect.get_height()
plt.text(rect.get_x() + rect.get_width() * 0.5, 1.02 * height, '{}, {:.1f}%'.format(int(height), 100 * ratio),
horizontalalignment='center', verticalalignment='bottom')
class CardRecord:
Pattern = re.compile(r'\d+\. (?P<name>\S+) (?P<class>\w+) (?P<type>\w+) (?P<rarity>\w+)(?: (?P<race>\w+))? '
r'(?P<cost>\d+)(?: (?P<attack>\d+) (?P<health>\d+))? - ?(?P<description>.*?)(?://.*)?$')
AllCards = []
Classes = {
'中立': 0,
'德鲁伊': 1, '法师': 2, '圣骑士': 3,
'猎人': 4, '术士': 5, '战士': 6,
'潜行者': 7, '牧师': 8, '萨满': 9,
'武僧': 10, '死亡骑士': 11,
}
Rarities = {
'基本': 0, '普通': 1, '稀有': 2,
'史诗': 3, '传说': 4, '衍生物': 5,
}
Types = {
'随从': 0, '法术': 1, '武器': 2, '英雄': 3,
}
def __init__(self, match):
self.AllCards.append(self)
self.name = match.group('name')
self.klass = match.group('class')
self.type = match.group('type')
self.rarity = match.group('rarity')
self.race = match.group('race')
self.cost = _get_int(match.group('cost'))
self.attack = _get_int(match.group('attack'))
self.health = _get_int(match.group('health'))
self.description = match.group('description')
def __repr__(self):
return 'Card(name={name}, class={klass}, type={type}, rarity={rarity}, race={race}, ' \
'CAH=[{cost}, {attack}, {health}], description={description})'.format_map(vars(self))
@classmethod
def print_all(cls):
print('{} cards total'.format(len(cls.AllCards)))
for card in cls.AllCards:
print(card)
@classmethod
def print_brief(cls):
print('{} cards total'.format(len(cls.AllCards)))
@classmethod
def sort_key(cls, field):
if field == 'klass':
return lambda k: cls.Classes.get(k, -1)
if field == 'type':
return lambda k: cls.Types.get(k, -1)
if field == 'rarity':
return lambda k: cls.Rarities.get(k, -1)
return lambda k: -1 if k is None else k
@classmethod
def plot_over(cls, fields=('cost',), show_none=False):
if isinstance(show_none, bool):
show_none = [show_none for _ in range(len(fields))]
assert len(fields) == len(show_none)
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
n_figures = len(fields)
bar_width = 0.5
colors = ['b', 'r', 'g', 'k']
for fig_i, (field, sn) in enumerate(zip(fields, show_none)):
# Count.
all_values = defaultdict(set)
for card in cls.AllCards:
if not hasattr(card, field):
continue
all_values[getattr(card, field)].add(card)
if not sn:
if None in all_values:
del all_values[None]
# Plot.
plt.subplot(2, (n_figures + 1) // 2, fig_i + 1)
index = np.arange(len(all_values))
keys = sorted(list(all_values.keys()), key=cls.sort_key(field))
values = [len(all_values[key]) for key in keys]
rects = plt.bar(
index, values,
bar_width,
color=colors[fig_i % len(colors)],
tick_label=keys,
label=field,
)
_auto_label(rects)
plt.xticks(index)
plt.legend()
plt.grid(axis='y', linestyle='--')
plt.tight_layout()
plt.show()
def main(args=None):
# Usage: python cards_statistic.py [pattern of package Markdown files]
if args is None:
args = sys.argv[1:]
patterns = args
for pattern in patterns:
for filename in glob.glob(pattern):
with open(filename, 'r', encoding='utf-8') as f:
for line in f:
match = CardRecord.Pattern.match(line)
if match:
CardRecord(match)
CardRecord.print_brief()
CardRecord.plot_over(['cost', 'attack', 'health', 'klass', 'rarity', 'type'], True)
if __name__ == '__main__':
# packages = [
# '../doc/official/BasicClassic.md',
# '../doc/official/Naxxramas.md',
# '../doc/official/GVG.md',
# '../doc/official/BlackMountain.md',
# '../doc/official/TGT.md',
# ]
packages = [
'../doc/diy/MyExtension.md',
'../doc/diy/MyExtension2.md',
'../doc/diy/MyExtension3.md',
'../doc/diy/MyAdventure.md',
'../doc/diy/MyAdventure2.md',
'../doc/diy/MonkAdventure.md',
]
main(packages)
|
mit
|
biocore/qiime2
|
qiime2/metadata/io.py
|
1
|
17428
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2019, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import csv
import itertools
import os.path
import re
import numpy as np
import pandas as pd
from qiime2.core.util import find_duplicates
from .base import SUPPORTED_COLUMN_TYPES, FORMATTED_ID_HEADERS, is_id_header
from .metadata import Metadata, MetadataColumn
class MetadataFileError(Exception):
_suffix = (
"There may be more errors present in the metadata file. To get a full "
"report, sample/feature metadata files can be validated with Keemei: "
"https://keemei.qiime2.org\n\nFind details on QIIME 2 metadata "
"requirements here: https://docs.qiime2.org/%s/tutorials/metadata/")
def __init__(self, message, include_suffix=True):
# Lazy import because `qiime2.__release__` is available at runtime but
# not at import time (otherwise the release value could be interpolated
# into `_suffix` in the class definition above).
import qiime2
if include_suffix:
message = message + '\n\n' + self._suffix % qiime2.__release__
super().__init__(message)
class MetadataReader:
def __init__(self, filepath):
if not os.path.isfile(filepath):
raise MetadataFileError(
"Metadata file path doesn't exist, or the path points to "
"something other than a file. Please check that the path "
"exists, has read permissions, and points to a regular file "
"(not a directory): %s" % filepath)
self._filepath = filepath
# Used by `read()` to store an iterator yielding rows with
# leading/trailing whitespace stripped from their cells (this is a
# preprocessing step that should happen with *every* row). The iterator
# protocol is the only guaranteed API on this object.
self._reader = None
def read(self, into, column_types=None):
if column_types is None:
column_types = {}
try:
# Newline settings based on recommendation from csv docs:
# https://docs.python.org/3/library/csv.html#id3
# Ignore BOM on read (but do not write BOM)
with open(self._filepath,
'r', newline='', encoding='utf-8-sig') as fh:
tsv_reader = csv.reader(fh, dialect='excel-tab', strict=True)
self._reader = (self._strip_cell_whitespace(row)
for row in tsv_reader)
header = self._read_header()
directives = self._read_directives(header)
ids, data = self._read_data(header)
except UnicodeDecodeError as e:
if ('0xff in position 0' in str(e)
or '0xfe in position 0' in str(e)):
raise MetadataFileError(
"Metadata file must be encoded as UTF-8 or ASCII, found "
"UTF-16. If this file is from Microsoft Excel, save "
"as a plain text file, not 'UTF-16 Unicode'")
raise MetadataFileError(
"Metadata file must be encoded as UTF-8 or ASCII. The "
"following error occurred when decoding the file:\n\n%s" % e)
finally:
self._reader = None
index = pd.Index(ids, name=header[0], dtype=object)
df = pd.DataFrame(data, columns=header[1:], index=index, dtype=object)
for name, type in column_types.items():
if name not in df.columns:
raise MetadataFileError(
"Column name %r specified in `column_types` is not a "
"column in the metadata file." % name)
if type not in SUPPORTED_COLUMN_TYPES:
fmt_column_types = ', '.join(
repr(e) for e in sorted(SUPPORTED_COLUMN_TYPES))
raise MetadataFileError(
"Column name %r specified in `column_types` has an "
"unrecognized column type %r. Supported column types: %s" %
(name, type, fmt_column_types))
resolved_column_types = directives.get('types', {})
resolved_column_types.update(column_types)
try:
# Cast each column to the appropriate dtype based on column type.
df = df.apply(self._cast_column, axis='index',
column_types=resolved_column_types)
except MetadataFileError as e:
# HACK: If an exception is raised within `DataFrame.apply`, pandas
# adds an extra tuple element to `e.args`, making the original
# error message difficult to read because a tuple is repr'd instead
# of a string. To work around this, we catch and reraise a
# MetadataFileError with the original error message. We use
# `include_suffix=False` to avoid adding another suffix to the
# error message we're reraising.
msg = e.args[0]
raise MetadataFileError(msg, include_suffix=False)
try:
return into(df)
except Exception as e:
raise MetadataFileError(
"There was an issue with loading the metadata file:\n\n%s" % e)
def _read_header(self):
header = None
for row in self._reader:
if self._is_header(row):
header = row
break
elif self._is_comment(row):
continue
elif self._is_empty(row):
continue
elif self._is_directive(row):
raise MetadataFileError(
"Found directive %r while searching for header. "
"Directives may only appear immediately after the header."
% row[0])
else:
raise MetadataFileError(
"Found unrecognized ID column name %r while searching for "
"header. The first column name in the header defines the "
"ID column, and must be one of these values:\n\n%s" %
(row[0], FORMATTED_ID_HEADERS))
if header is None:
raise MetadataFileError(
"Failed to locate header. The metadata file may be empty, or "
"consists only of comments or empty rows.")
# Trim trailing empty cells from header.
data_extent = None
for idx, cell in enumerate(header):
if cell != '':
data_extent = idx
header = header[:data_extent+1]
# Basic validation to 1) fail early before processing entire file; and
# 2) make some basic guarantees about the header for things in this
# class that use the header as part of reading the file.
column_names = set(header)
if '' in column_names:
raise MetadataFileError(
"Found at least one column without a name in the header. Each "
"column must be named.")
elif len(header) != len(column_names):
duplicates = find_duplicates(header)
raise MetadataFileError(
"Column names must be unique. The following column names are "
"duplicated: %s" %
(', '.join(repr(e) for e in sorted(duplicates))))
# Skip the first element of the header because we know it is a valid ID
# header. The other column names are validated to ensure they *aren't*
# valid ID headers.
for column_name in header[1:]:
if is_id_header(column_name):
raise MetadataFileError(
"Metadata column name %r conflicts with a name reserved "
"for the ID column header. Reserved ID column headers:"
"\n\n%s" % (column_name, FORMATTED_ID_HEADERS))
return header
def _read_directives(self, header):
directives = {}
for row in self._reader:
if not self._is_directive(row):
self._reader = itertools.chain([row], self._reader)
break
if not self._is_column_types_directive(row):
raise MetadataFileError(
"Unrecognized directive %r. Only the #q2:types "
"directive is supported at this time." % row[0])
if 'types' in directives:
raise MetadataFileError(
"Found duplicate directive %r. Each directive may "
"only be specified a single time." % row[0])
row = self._match_header_len(row, header)
column_types = {}
for column_name, column_type in zip(header[1:], row[1:]):
if column_type:
type_nocase = column_type.lower()
if type_nocase in SUPPORTED_COLUMN_TYPES:
column_types[column_name] = type_nocase
else:
fmt_column_types = ', '.join(
repr(e) for e in sorted(SUPPORTED_COLUMN_TYPES))
raise MetadataFileError(
"Column %r has an unrecognized column type %r "
"specified in its #q2:types directive. "
"Supported column types (case-insensitive): %s"
% (column_name, column_type, fmt_column_types))
directives['types'] = column_types
return directives
def _read_data(self, header):
ids = []
data = []
for row in self._reader:
if self._is_comment(row):
continue
elif self._is_empty(row):
continue
elif self._is_directive(row):
raise MetadataFileError(
"Found directive %r outside of the directives section of "
"the file. Directives may only appear immediately after "
"the header." % row[0])
elif self._is_header(row):
raise MetadataFileError(
"Metadata ID %r conflicts with a name reserved for the ID "
"column header. Reserved ID column headers:\n\n%s" %
(row[0], FORMATTED_ID_HEADERS))
row = self._match_header_len(row, header)
ids.append(row[0])
data.append(row[1:])
return ids, data
def _strip_cell_whitespace(self, row):
return [cell.strip() for cell in row]
def _match_header_len(self, row, header):
row_len = len(row)
header_len = len(header)
if row_len < header_len:
# Pad row with empty cells to match header length.
row = row + [''] * (header_len - row_len)
elif row_len > header_len:
trailing_row = row[header_len:]
if not self._is_empty(trailing_row):
raise MetadataFileError(
"Metadata row contains more cells than are declared by "
"the header. The row has %d cells, while the header "
"declares %d cells." % (row_len, header_len))
row = row[:header_len]
return row
def _is_empty(self, row):
# `all` returns True for an empty iterable, so this check works for a
# row of zero elements (corresponds to a blank line in the file).
return all((cell == '' for cell in row))
def _is_comment(self, row):
return (
len(row) > 0 and
row[0].startswith('#') and
not self._is_directive(row) and
not self._is_header(row)
)
def _is_header(self, row):
if len(row) == 0:
return False
return is_id_header(row[0])
def _is_directive(self, row):
return len(row) > 0 and row[0].startswith('#q2:')
def _is_column_types_directive(self, row):
return len(row) > 0 and row[0] == '#q2:types'
def _cast_column(self, series, column_types):
if series.name in column_types:
if column_types[series.name] == 'numeric':
return self._to_numeric(series)
else: # 'categorical'
return self._to_categorical(series)
else:
# Infer type
try:
return self._to_numeric(series)
except MetadataFileError:
return self._to_categorical(series)
def _to_categorical(self, series):
# Replace empty strings with `None` to force the series to remain
# dtype=object (this only matters if the series consists solely of
# missing data). Replacing with np.nan and casting to dtype=object
# won't retain the correct dtype in the resulting dataframe
# (`DataFrame.apply` seems to force series consisting solely of np.nan
# to dtype=float64, even if dtype=object is specified.
#
# To replace a value with `None`, the following invocation of
# `Series.replace` must be used because `None` is a sentinel:
# https://stackoverflow.com/a/17097397/3776794
return series.replace([''], [None])
def _to_numeric(self, series):
series = series.replace('', np.nan)
is_numeric = series.apply(self._is_numeric)
if is_numeric.all():
return pd.to_numeric(series, errors='raise')
else:
non_numerics = series[~is_numeric].unique()
raise MetadataFileError(
"Cannot convert metadata column %r to numeric. The following "
"values could not be interpreted as numeric: %s" %
(series.name,
', '.join(repr(e) for e in sorted(non_numerics))))
def _is_numeric(self, value):
return (isinstance(value, float) or
len(_numeric_regex.findall(value)) == 1)
class MetadataWriter:
def __init__(self, metadata):
self._metadata = metadata
def write(self, filepath):
# Newline settings based on recommendation from csv docs:
# https://docs.python.org/3/library/csv.html#id3
# Do NOT write a BOM, hence utf-8 not utf-8-sig
with open(filepath, 'w', newline='', encoding='utf-8') as fh:
tsv_writer = csv.writer(fh, dialect='excel-tab', strict=True)
md = self._metadata
header = [md.id_header]
types_directive = ['#q2:types']
if isinstance(md, Metadata):
for name, props in md.columns.items():
header.append(name)
types_directive.append(props.type)
elif isinstance(md, MetadataColumn):
header.append(md.name)
types_directive.append(md.type)
else:
raise NotImplementedError
tsv_writer.writerow(header)
tsv_writer.writerow(types_directive)
df = md.to_dataframe()
df.fillna('', inplace=True)
df = df.applymap(self._format)
tsv_writer.writerows(df.itertuples(index=True))
def _format(self, value):
if isinstance(value, str):
return value
elif isinstance(value, float):
# Use fixed precision or scientific notation as necessary (both are
# roundtrippable in the metadata file format), with up to 15 digits
# *total* precision (i.e. before and after the decimal point),
# rounding if necessary. Trailing zeros or decimal points will not
# be included in the formatted string (e.g. 42.0 will be formatted
# as "42"). A precision of 15 digits is used because that is within
# the 64-bit floating point spec (things get weird after that).
#
# Using repr() and str() each have their own predefined precision
# which varies across Python versions. Using the string formatting
# presentation types (e.g. %g, %f) without specifying a precision
# will usually default to 6 digits past the decimal point, which
# seems a little low.
#
# References:
#
# - https://stackoverflow.com/a/2440786/3776794
# - https://stackoverflow.com/a/2440708/3776794
# - https://docs.python.org/3/library/string.html#
# format-specification-mini-language
# - https://stackoverflow.com/a/20586479/3776794
# - https://drj11.wordpress.com/2007/07/03/python-poor-printing-
# of-floating-point/
return '{0:.15g}'.format(value)
else:
raise NotImplementedError
# Credit: https://stackoverflow.com/a/4703508/3776794
_numeric_pattern = r"""
^[-+]? # optional sign
(?:
(?: \d* \. \d+ ) # .1 .12 .123 etc 9.1 etc 98.1 etc
|
(?: \d+ \.? ) # 1. 12. 123. etc 1 12 123 etc
)
# followed by optional exponent part if desired
(?: [Ee] [+-]? \d+ ) ?$
"""
_numeric_regex = re.compile(_numeric_pattern, re.VERBOSE)
|
bsd-3-clause
|
michigraber/scikit-learn
|
sklearn/metrics/regression.py
|
175
|
16953
|
"""Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Manoj Kumar <[email protected]>
# Michael Eickenberg <[email protected]>
# Konstantin Shmelkov <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
import warnings
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
multioutput_options = (None, 'raw_values', 'uniform_average',
'variance_weighted')
if multioutput not in multioutput_options:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred,
sample_weight=None,
multioutput=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average',
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default value correponds to 'variance_weighted', but
will be changed to 'uniform_average' in next versions.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted') # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput is None and y_true.shape[1] != 1:
# @FIXME change in 0.18
warnings.warn("Default 'multioutput' behavior now corresponds to "
"'variance_weighted' value, it will be changed "
"to 'uniform_average' in 0.18.",
DeprecationWarning)
multioutput = 'variance_weighted'
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
|
bsd-3-clause
|
marcocaccin/LearningMetaDynamics
|
MD_unconstrained/md_db0_build.py
|
1
|
7472
|
from __future__ import division, print_function
import scipy as sp
import scipy.linalg as LA
import os
from ase import units
from ase import Atoms
import matplotlib.pyplot as plt
from matplotlib.mlab import griddata
import pickle as pkl
dihedral_atoms_phi = [4,6,8,14] # C(O)-N-C(a)-C(O)
dihedral_atoms_psi = [6,8,14,16] # N-C(a)-C(O)-N
fun_group_phi = range(6) + [7]
fun_group_psi = range(15,22)
#############################################################
##### Utility functions to be added to ase.Atoms Class #####
def phi_(self, dihedral_list=dihedral_atoms_phi):
return self.get_dihedral(dihedral_list)
def psi_(self, dihedral_list=dihedral_atoms_psi):
return self.get_dihedral(dihedral_list)
def colvars_(self):
s = sp.atleast_2d(sp.array([self.phi(), self.psi()]))
return s
def grid(x, y, z, resX=100, resY=100):
"Convert 3 column data to matplotlib grid"
xi = sp.linspace(min(x), max(x), resX)
yi = sp.linspace(min(y), max(y), resY)
Z = griddata(x, y, z, xi, yi, interp='linear')
X, Y = sp.meshgrid(xi, yi)
return X, Y, Z
def round_vector(vec, precision = 0.1):
return ((vec + 0.5 * precision)/ precision).astype('int') * precision
### CODE STARTS HERE ###
run_from_scratch = False
T = 300
if run_from_scratch:
setattr(Atoms, 'phi', phi_)
setattr(Atoms, 'psi', psi_)
setattr(Atoms, 'colvars', colvars_)
os.system('lmp_mpi < input_md')
# load trajectory and get atomic positions into adata
print("Reading positions from trajectory file...")
data = []
with open('lmp_md.xyz', 'r') as file:
for i, line in enumerate(file.readlines()):
if i % 31 > 8:
data.append(line.split()[2:5])
n_atoms = 22
print("Converting data...")
data = sp.asarray(data).astype('float')
data = data.reshape((len(data)/n_atoms, n_atoms, 3))
# write potential energies to file
print("Reading potential energies...")
os.system('grep PotEng log.lammps | awk {\'print $3\'} > PotEng.md')
energies = sp.loadtxt('PotEng.md')
energies *= units.kcal / units.mol
# write kinetic energies to file
os.system('grep KinEng log.lammps | awk {\'print $6\'} > KinEng.md')
kinengs = sp.loadtxt('KinEng.md')
kinengs *= units.kcal / units.mol
# now extract CVs from positions
colvars = []
print("Converting positions into collective variables...")
for positions in data:
atoms = Atoms(['H']*n_atoms, positions)
colvars.append(atoms.colvars().flatten())
colvars = sp.asarray(colvars)
phipsi_pot = sp.hstack((colvars,energies[:,None], kinengs[:,None]))
print("Saving data...")
sp.savetxt('phi_psi_pot_md300.csv', phipsi_potkin)
else:
data = sp.loadtxt('phi_psi_pot_md300.csv')
colvars = data[:,:2]
energies = data[:,2]
kinengs = data[:,3]
colvars_r = round_vector(colvars)
energies_r = {}
kinengs_r = {}
for i, s in enumerate(colvars_r):
if ('%f-%f' % (s[0], s[1])) in energies_r.keys():
energies_r['%f-%f' % (s[0], s[1])].append(energies[i])
kinengs_r['%f-%f' % (s[0], s[1])].append(kinengs[i])
else:
energies_r['%f-%f' % (s[0], s[1])] = [energies[i]]
kinengs_r['%f-%f' % (s[0], s[1])] = [kinengs[i]]
colvars_2 = []
energies_mean = []
energies_min = []
n_confs = []
free_energies = []
ftilde = []
ftilde2 = []
meankin = 1.5 * 22 * units.kB * T
meane_plus_kin = []
beta = 1 / (units.kB * T)
for s, energy in energies_r.iteritems():
energy = sp.array(energy)
kin = sp.array(kinengs_r[s])
colvars_2.append(sp.array(s.split('-')).astype('float'))
energies_mean.append(energy.mean())
energies_min.append(energy.min())
n_confs.append(len(energy))
free_energies.append(- units.kB * T * sp.log(sp.exp(-energy / (units.kB * T)).sum()))
ftilde.append(units.kB * T * (- sp.log(n_confs[-1]) + sp.log(sp.mean(sp.exp(beta * (energy - energies_min[-1]))))))
ftilde2.append(units.kB * T * (- sp.log(n_confs[-1]) + sp.log(sp.mean(sp.exp(beta * energy)))))
meane_plus_kin.append(energies_mean[-1] + kin.mean() - meankin)
colvars_2 = sp.array(colvars_2)
n_confs = sp.array(n_confs)
energies_min = sp.array(energies_min)
energies_mean = sp.array(energies_mean)
free_energies = sp.array(free_energies)
ftilde = sp.array(ftilde)
ftilde2 = sp.array(ftilde2)
meane_plus_kin = sp.array(meane_plus_kin)
phi, psi = colvars_2[:,0], colvars_2[:,1]
phimin, phimax = phi.min(), phi.max()
psimin, psimax = psi.min(), psi.max()
phirange = phimax - phimin
psirange = psimax - psimin
aspect_ratio = psirange/phirange
print("Plotting trajectory...")
fig, ax = plt.subplots(1,1,figsize=(10,10*aspect_ratio))
sc = ax.scatter(phi, psi, c=energies_mean, marker = 's', s = 290,
cmap = 'RdBu', alpha = .8, edgecolors='none')
ax.set_xlim(phimin, phimax)
ax.set_ylim(psimin, psimax)
plt.colorbar(sc, format='%.3e')
fig.savefig('energy_mean.png')
ax.clear()
fig, ax = plt.subplots(1,1,figsize=(10,10*aspect_ratio))
sc = ax.scatter(phi, psi, c=energies_min, marker = 's', s = 290,
cmap = 'RdBu', alpha = .8, edgecolors='none')
ax.set_xlim(phimin, phimax)
ax.set_ylim(psimin, psimax)
plt.colorbar(sc, format='%.3e')
fig.savefig('energy_min.png')
ax.clear()
fig, ax = plt.subplots(1,1,figsize=(10,10*aspect_ratio))
sc = ax.scatter(phi, psi, c=free_energies, marker = 's', s = 290,
cmap = 'RdBu', alpha = .8, edgecolors='none')
ax.set_xlim(phimin, phimax)
ax.set_ylim(psimin, psimax)
plt.colorbar(sc, format='%.3e')
fig.savefig('free_energy.png')
ax.clear()
fig, ax = plt.subplots(1,1,figsize=(10,10*aspect_ratio))
sc = ax.scatter(phi, psi, c=(energies_min - free_energies), marker = 's', s = 290,
cmap = 'RdBu', alpha = .8, edgecolors='none')
ax.set_xlim(phimin, phimax)
ax.set_ylim(psimin, psimax)
plt.colorbar(sc, format='%.3e')
fig.savefig('TS_min.png')
ax.clear()
fig, ax = plt.subplots(1,1,figsize=(10,10*aspect_ratio))
sc = ax.scatter(phi, psi, c=(energies_min + ftilde), marker = 's', s = 290,
cmap = 'RdBu', alpha = .8, edgecolors='none')
ax.set_xlim(phimin, phimax)
ax.set_ylim(psimin, psimax)
plt.colorbar(sc, format='%.3e')
fig.savefig('Emin_plus_Ftilde.png')
ax.clear()
fig, ax = plt.subplots(1,1,figsize=(10,10*aspect_ratio))
sc = ax.scatter(phi, psi, c=ftilde, marker = 's', s = 290,
cmap = 'RdBu', alpha = .8, edgecolors='none')
ax.set_xlim(phimin, phimax)
ax.set_ylim(psimin, psimax)
plt.colorbar(sc, format='%.3e')
fig.savefig('Ftilde_only.png')
ax.clear()
fig, ax = plt.subplots(1,1,figsize=(10,10*aspect_ratio))
sc = ax.scatter(phi, psi, c=ftilde2, marker = 's', s = 290,
cmap = 'RdBu', alpha = .8, edgecolors='none')
ax.set_xlim(phimin, phimax)
ax.set_ylim(psimin, psimax)
plt.colorbar(sc, format='%.3e')
fig.savefig('Ftilde2_only.png')
ax.clear()
fig, ax = plt.subplots(1,1,figsize=(10,10*aspect_ratio))
sc = ax.scatter(phi, psi, c=-units.kB * T * sp.log(n_confs), marker = 's', s = 290,
cmap = 'RdBu', alpha = .8, edgecolors='none')
ax.set_xlim(phimin, phimax)
ax.set_ylim(psimin, psimax)
plt.colorbar(sc, format='%.3e')
fig.savefig('F_from_N.png')
ax.clear()
fig, ax = plt.subplots(1,1,figsize=(10,10*aspect_ratio))
sc = ax.scatter(phi, psi, c=meane_plus_kin, marker = 's', s = 290,
cmap = 'RdBu', alpha = .8, edgecolors='none')
ax.set_xlim(phimin, phimax)
ax.set_ylim(psimin, psimax)
plt.colorbar(sc, format='%.3e')
fig.savefig('meane_plus_kin.png')
ax.clear()
|
gpl-2.0
|
teonlamont/mne-python
|
mne/viz/tests/test_evoked.py
|
2
|
14543
|
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Cathy Nangini <[email protected]>
# Mainak Jas <[email protected]>
# Jona Sassenhagen <[email protected]>
#
# License: Simplified BSD
import os.path as op
import numpy as np
from numpy.testing import assert_allclose
import pytest
import mne
from mne import (read_events, Epochs, pick_types, read_cov, compute_covariance,
make_fixed_length_events)
from mne.channels import read_layout
from mne.io import read_raw_fif
from mne.utils import run_tests_if_main, catch_logging
from mne.viz.evoked import _line_plot_onselect, plot_compare_evokeds
from mne.viz.utils import _fake_click
from mne.stats import _parametric_ci
from mne.datasets import testing
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
raw_sss_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_name = op.join(base_dir, 'test-eve.fif')
event_id, tmin, tmax = 1, -0.1, 0.1
n_chan = 6
layout = read_layout('Vectorview-all')
def _get_picks(raw):
"""Get picks."""
return pick_types(raw.info, meg=True, eeg=False, stim=False,
ecg=False, eog=False, exclude='bads')
def _get_epochs():
"""Get epochs."""
raw = read_raw_fif(raw_fname)
raw.add_proj([], remove_existing=True)
events = read_events(event_name)
picks = _get_picks(raw)
# Use a subset of channels for plotting speed
picks = picks[np.round(np.linspace(0, len(picks) - 1, n_chan)).astype(int)]
# make sure we have a magnetometer and a pair of grad pairs for topomap.
picks = np.concatenate([[2, 3, 4, 6, 7], picks])
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks)
epochs.info['bads'] = [epochs.ch_names[-1]]
return epochs
def _get_epochs_delayed_ssp():
"""Get epochs with delayed SSP."""
raw = read_raw_fif(raw_fname)
events = read_events(event_name)
picks = _get_picks(raw)
reject = dict(mag=4e-12)
epochs_delayed_ssp = Epochs(raw, events[:10], event_id, tmin, tmax,
picks=picks, proj='delayed', reject=reject)
return epochs_delayed_ssp
def test_plot_evoked_cov():
"""Test plot_evoked with noise_cov."""
import matplotlib.pyplot as plt
evoked = _get_epochs().average()
cov = read_cov(cov_fname)
cov['projs'] = [] # avoid warnings
evoked.plot(noise_cov=cov, time_unit='s')
with pytest.raises(TypeError, match='Covariance'):
evoked.plot(noise_cov=1., time_unit='s')
with pytest.raises(IOError, match='No such file'):
evoked.plot(noise_cov='nonexistent-cov.fif', time_unit='s')
raw = read_raw_fif(raw_sss_fname)
events = make_fixed_length_events(raw)
epochs = Epochs(raw, events)
cov = compute_covariance(epochs)
evoked_sss = epochs.average()
with pytest.warns(RuntimeWarning, match='relative scaling'):
evoked_sss.plot(noise_cov=cov, time_unit='s')
plt.close('all')
@pytest.mark.slowtest
def test_plot_evoked():
"""Test plotting of evoked."""
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
evoked = _get_epochs().average()
fig = evoked.plot(proj=True, hline=[1], exclude=[], window_title='foo',
time_unit='s')
# Test a click
ax = fig.get_axes()[0]
line = ax.lines[0]
_fake_click(fig, ax,
[line.get_xdata()[0], line.get_ydata()[0]], 'data')
_fake_click(fig, ax,
[ax.get_xlim()[0], ax.get_ylim()[1]], 'data')
# plot with bad channels excluded & spatial_colors & zorder
evoked.plot(exclude='bads', time_unit='s')
# test selective updating of dict keys is working.
evoked.plot(hline=[1], units=dict(mag='femto foo'), time_unit='s')
evoked_delayed_ssp = _get_epochs_delayed_ssp().average()
evoked_delayed_ssp.plot(proj='interactive', time_unit='s')
evoked_delayed_ssp.apply_proj()
pytest.raises(RuntimeError, evoked_delayed_ssp.plot,
proj='interactive', time_unit='s')
evoked_delayed_ssp.info['projs'] = []
pytest.raises(RuntimeError, evoked_delayed_ssp.plot,
proj='interactive', time_unit='s')
pytest.raises(RuntimeError, evoked_delayed_ssp.plot,
proj='interactive', axes='foo', time_unit='s')
plt.close('all')
# test GFP only
evoked.plot(gfp='only', time_unit='s')
pytest.raises(ValueError, evoked.plot, gfp='foo', time_unit='s')
evoked.plot_image(proj=True, time_unit='ms')
# fail nicely on NaN
evoked_nan = evoked.copy()
evoked_nan.data[:, 0] = np.nan
pytest.raises(ValueError, evoked_nan.plot)
with np.errstate(invalid='ignore'):
pytest.raises(ValueError, evoked_nan.plot_image)
pytest.raises(ValueError, evoked_nan.plot_joint)
# test mask
evoked.plot_image(picks=[1, 2], mask=evoked.data > 0, time_unit='s')
evoked.plot_image(picks=[1, 2], mask_cmap=None, colorbar=False,
mask=np.ones(evoked.data.shape).astype(bool),
time_unit='s')
with pytest.warns(RuntimeWarning, match='not adding contour'):
evoked.plot_image(picks=[1, 2], mask=None, mask_style="both",
time_unit='s')
pytest.raises(ValueError, evoked.plot_image, mask=evoked.data[1:, 1:] > 0,
time_unit='s')
# plot with bad channels excluded
evoked.plot_image(exclude='bads', cmap='interactive', time_unit='s')
evoked.plot_image(exclude=evoked.info['bads'], time_unit='s') # same thing
plt.close('all')
pytest.raises(ValueError, evoked.plot_image, picks=[0, 0],
time_unit='s') # duplicates
ch_names = ["MEG 1131", "MEG 0111"]
picks = [evoked.ch_names.index(ch) for ch in ch_names]
evoked.plot_image(show_names="all", time_unit='s', picks=picks)
yticklabels = plt.gca().get_yticklabels()
for tick_target, tick_observed in zip(ch_names, yticklabels):
assert tick_target in str(tick_observed)
evoked.plot_image(show_names=True, time_unit='s')
# test groupby
evoked.plot_image(group_by=dict(sel=[0, 7]), axes=dict(sel=plt.axes()))
plt.close('all')
for group_by, axes in (("something", dict()), (dict(), "something")):
pytest.raises(ValueError, evoked.plot_image, group_by=group_by,
axes=axes)
# test plot_topo
evoked.plot_topo() # should auto-find layout
_line_plot_onselect(0, 200, ['mag', 'grad'], evoked.info, evoked.data,
evoked.times)
plt.close('all')
cov = read_cov(cov_fname)
cov['method'] = 'empirical'
cov['projs'] = [] # avoid warnings
# test rank param.
evoked.plot_white(cov, rank={'mag': 101, 'grad': 201}, time_unit='s')
evoked.plot_white(cov, rank={'mag': 101}, time_unit='s') # test rank param
evoked.plot_white(cov, rank={'grad': 201}, time_unit='s')
pytest.raises(
ValueError, evoked.plot_white, cov,
rank={'mag': 101, 'grad': 201, 'meg': 306}, time_unit='s')
pytest.raises(
ValueError, evoked.plot_white, cov, rank={'meg': 306}, time_unit='s')
evoked.plot_white([cov, cov], time_unit='s')
# plot_compare_evokeds: test condition contrast, CI, color assignment
plot_compare_evokeds(evoked.copy().pick_types(meg='mag'))
plot_compare_evokeds(
evoked.copy().pick_types(meg='grad'), picks=[1, 2],
show_sensors="upper right", show_legend="upper left")
evokeds = [evoked.copy() for _ in range(10)]
for evoked in evokeds:
evoked.data += (rng.randn(*evoked.data.shape) *
np.std(evoked.data, axis=-1, keepdims=True))
for picks in ([0], [1], [2], [0, 2], [1, 2], [0, 1, 2],):
figs = plot_compare_evokeds([evokeds], picks=picks, ci=0.95)
if not isinstance(figs, list):
figs = [figs]
for fig in figs:
ext = fig.axes[0].collections[0].get_paths()[0].get_extents()
xs, ylim = ext.get_points().T
assert_allclose(xs, evoked.times[[0, -1]])
line = fig.axes[0].lines[0]
xs = line.get_xdata()
assert_allclose(xs, evoked.times)
ys = line.get_ydata()
assert (ys < ylim[1]).all()
assert (ys > ylim[0]).all()
plt.close('all')
evoked.rename_channels({'MEG 2142': "MEG 1642"})
assert len(plot_compare_evokeds(evoked)) == 2
colors = dict(red='r', blue='b')
linestyles = dict(red='--', blue='-')
red, blue = evoked.copy(), evoked.copy()
red.data *= 1.1
blue.data *= 0.9
plot_compare_evokeds([red, blue], picks=3) # list of evokeds
plot_compare_evokeds([red, blue], picks=3, truncate_yaxis=True,
vlines=[]) # also testing empty vlines here
plot_compare_evokeds([[red, evoked], [blue, evoked]],
picks=3) # list of lists
# test picking & plotting grads
contrast = dict()
contrast["red/stim"] = list((evoked.copy(), red))
contrast["blue/stim"] = list((evoked.copy(), blue))
# test a bunch of params at once
for evokeds_ in (evoked.copy().pick_types(meg='mag'), contrast,
[red, blue], [[red, evoked], [blue, evoked]]):
plot_compare_evokeds(evokeds_, picks=0, ci=True) # also tests CI
plt.close('all')
# test styling + a bunch of other params at once
colors, linestyles = dict(red='r', blue='b'), dict(red='--', blue='-')
plot_compare_evokeds(contrast, colors=colors, linestyles=linestyles,
picks=[0, 2], vlines=[.01, -.04], invert_y=True,
truncate_yaxis=False, ylim=dict(mag=(-10, 10)),
styles={"red/stim": {"linewidth": 1}},
show_sensors=True)
# various bad styles
params = [dict(picks=3, colors=dict(fake=1)),
dict(picks=3, styles=dict(fake=1)), dict(picks=3, gfp=True),
dict(picks=3, show_sensors="a"),
dict(colors=dict(red=10., blue=-2))]
for param in params:
pytest.raises(ValueError, plot_compare_evokeds, evoked, **param)
pytest.raises(TypeError, plot_compare_evokeds, evoked, picks='str')
pytest.raises(TypeError, plot_compare_evokeds, evoked, vlines='x')
plt.close('all')
# `evoked` must contain Evokeds
pytest.raises(TypeError, plot_compare_evokeds, [[1, 2], [3, 4]])
# `ci` must be float or None
pytest.raises(TypeError, plot_compare_evokeds, contrast, ci='err')
# test all-positive ylim
contrast["red/stim"], contrast["blue/stim"] = red, blue
plot_compare_evokeds(contrast, picks=[0], colors=['r', 'b'],
ylim=dict(mag=(1, 10)), ci=_parametric_ci,
truncate_yaxis='max_ticks', show_sensors=False,
show_legend=False)
# sequential colors
evokeds = (evoked, blue, red)
contrasts = {"a{}/b".format(ii): ev for ii, ev in
enumerate(evokeds)}
colors = {"a" + str(ii): ii for ii, _ in enumerate(evokeds)}
contrasts["a1/c"] = evoked.copy()
for split in (True, False):
for linestyles in (["-"], {"b": "-", "c": ":"}):
plot_compare_evokeds(
contrasts, colors=colors, picks=[0], cmap='Reds',
split_legend=split, linestyles=linestyles,
ci=False, show_sensors=False)
colors = {"a" + str(ii): ii / len(evokeds)
for ii, _ in enumerate(evokeds)}
plot_compare_evokeds(
contrasts, colors=colors, picks=[0], cmap='Reds',
split_legend=split, linestyles=linestyles, ci=False,
show_sensors=False)
red.info["chs"][0]["loc"][:2] = 0 # test plotting channel at zero
plot_compare_evokeds(red, picks=[0],
ci=lambda x: [x.std(axis=0), -x.std(axis=0)])
plot_compare_evokeds([red, blue], picks=[0], cmap="summer", ci=None,
split_legend=None)
plot_compare_evokeds([red, blue], cmap=None, split_legend=True)
pytest.raises(ValueError, plot_compare_evokeds, [red] * 20)
pytest.raises(ValueError, plot_compare_evokeds, contrasts,
cmap='summer')
plt.close('all')
# Hack to test plotting of maxfiltered data
evoked_sss = evoked.copy()
sss = dict(sss_info=dict(in_order=80, components=np.arange(80)))
evoked_sss.info['proc_history'] = [dict(max_info=sss)]
evoked_sss.plot_white(cov, rank={'meg': 64}, time_unit='s')
pytest.raises(
ValueError, evoked_sss.plot_white, cov, rank={'grad': 201},
time_unit='s')
evoked_sss.plot_white(cov, time_unit='s')
# plot with bad channels excluded, spatial_colors, zorder & pos. layout
evoked.rename_channels({'MEG 0133': 'MEG 0000'})
evoked.plot(exclude=evoked.info['bads'], spatial_colors=True, gfp=True,
zorder='std', time_unit='s')
evoked.plot(exclude=[], spatial_colors=True, zorder='unsorted',
time_unit='s')
pytest.raises(TypeError, evoked.plot, zorder='asdf', time_unit='s')
plt.close('all')
evoked.plot_sensors() # Test plot_sensors
plt.close('all')
evoked.pick_channels(evoked.ch_names[:4])
with catch_logging() as log_file:
evoked.plot(verbose=True, time_unit='s')
assert 'Need more than one' in log_file.getvalue()
@testing.requires_testing_data
def test_plot_ctf():
"""Test plotting of CTF evoked."""
ctf_dir = op.join(testing.data_path(download=False), 'CTF')
raw_fname = op.join(ctf_dir, 'testdata_ctf.ds')
raw = mne.io.read_raw_ctf(raw_fname, preload=True)
events = np.array([[200, 0, 1]])
event_id = 1
tmin, tmax = -0.1, 0.5 # start and end of an epoch in sec.
picks = mne.pick_types(raw.info, meg=True, stim=True, eog=True,
ref_meg=True, exclude='bads')
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, preload=True)
evoked = epochs.average()
evoked.plot_joint(times=[0.1])
mne.viz.plot_compare_evokeds([evoked, evoked])
run_tests_if_main()
|
bsd-3-clause
|
benofben/interactive-brokers-api
|
Martingale Simulation/RandomWalkPrice.py
|
1
|
1126
|
import random
def run():
results=[]
for unused_i in range(0,100000):
result=simulateOneMartingale()
results.append(result)
plot(results)
def simulateOneMartingale():
initialMargin=5060
maintenanceMargin=4600
transactionCost=2
capital=20000
numberOfOpenContracts=1
while numberOfOpenContracts!=0:
if random.random()<0.5:
# price went up a tick
capital+=numberOfOpenContracts*25
# We're long and we sell
numberOfOpenContracts-=1
capital-=transactionCost
else:
#price went down a tick
capital-=numberOfOpenContracts*25
if capital/numberOfOpenContracts<maintenanceMargin:
# We're long and forced to sell due to margin
numberOfOpenContracts-=1
capital-=transactionCost
elif initialMargin<capital-numberOfOpenContracts*maintenanceMargin:
# We're long and we buy
numberOfOpenContracts+=1
capital-=transactionCost
#print(str(numberOfOpenContracts) + ' ' + str(capital))
result=capital-20000
return result
def plot(results):
import matplotlib.pyplot as plt
plt.hist(results)
plt.show()
run()
|
mit
|
spallavolu/scikit-learn
|
sklearn/metrics/tests/test_pairwise.py
|
71
|
25104
|
import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import _parallel_pairwise
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unkown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels():
# Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "sigmoid", "polynomial", "linear", "chi2",
"additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {}
kwds['gamma'] = 0.1
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
|
bsd-3-clause
|
akrherz/iem
|
htdocs/plotting/auto/scripts/p25.py
|
1
|
3705
|
"""Distrubution of daily high and low temperatures"""
import datetime
import psycopg2.extras
import numpy as np
from scipy.stats import norm
import pandas as pd
from pyiem import reference
from pyiem.plot.use_agg import plt
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.exceptions import NoDataFound
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc["data"] = True
desc[
"description"
] = """This plot displays the distribution of observed
daily high and low temperatures for a given day and given state. The
dataset is fit with a simple normal distribution based on the simple
population statistics.
"""
desc["arguments"] = [
dict(type="state", name="state", default="IA", label="Which state?"),
dict(type="month", name="month", default="10", label="Select Month:"),
dict(type="day", name="day", default="7", label="Select Day:"),
]
return desc
def plotter(fdict):
""" Go """
pgconn = get_dbconn("coop", user="nobody")
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
ctx = get_autoplot_context(fdict, get_description())
month = ctx["month"]
day = ctx["day"]
state = ctx["state"][:2]
table = "alldata_%s" % (state,)
cursor.execute(
f"SELECT high, low from {table} where sday = %s and high is not null "
"and low is not null",
("%02i%02i" % (month, day),),
)
if cursor.rowcount == 0:
raise NoDataFound("No Data Found.")
highs = []
lows = []
for row in cursor:
highs.append(row[0])
lows.append(row[1])
highs = np.array(highs)
lows = np.array(lows)
(fig, ax) = plt.subplots(1, 1)
n, bins, _ = ax.hist(
highs,
bins=(np.max(highs) - np.min(highs)),
histtype="step",
density=True,
color="r",
zorder=1,
)
high_freq = pd.Series(n, index=bins[:-1])
mu, std = norm.fit(highs)
xmin, xmax = plt.xlim()
x = np.linspace(xmin, xmax, 100)
p = norm.pdf(x, mu, std)
ax.plot(x, p, "r--", linewidth=2)
ax.text(
0.8,
0.98,
"\n".join(
[
rf"High Temp\n$\mu$ = {mu:.1f}$^\circ$F",
rf"$\sigma$ = {std:.2f}",
rf"$n$ = {len(highs)}",
]
),
va="top",
ha="left",
color="r",
transform=ax.transAxes,
bbox=dict(color="white"),
)
n, bins, _ = ax.hist(
lows,
bins=(np.max(lows) - np.min(lows)),
histtype="step",
density=True,
color="b",
zorder=1,
)
low_freq = pd.Series(n, index=bins[:-1])
df = pd.DataFrame(dict(low_freq=low_freq, high_freq=high_freq))
df.index.name = "tmpf"
mu, std = norm.fit(lows)
xmin, xmax = plt.xlim()
x = np.linspace(xmin, xmax, 100)
p = norm.pdf(x, mu, std)
ax.plot(x, p, "b--", linewidth=2)
ts = datetime.datetime(2000, month, day)
ax.set_title(
("%s %s Temperature Distribution")
% (reference.state_names[state], ts.strftime("%d %B"))
)
ax.text(
0.02,
0.98,
"\n".join(
[
rf"Low Temp\n$\mu$ = {mu:.1f}$^\circ$F",
rf"$\sigma$ = {std:.2f}",
rf"$n$ = {len(lows)}",
]
),
va="top",
ha="left",
color="b",
transform=ax.transAxes,
bbox=dict(color="white"),
)
ax.grid(True)
ax.set_xlabel(r"Temperature $^\circ$F")
ax.set_ylabel("Probability")
return fig, df
if __name__ == "__main__":
plotter(dict())
|
mit
|
nelson-liu/scikit-learn
|
sklearn/linear_model/sag.py
|
18
|
11273
|
"""Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: Tom Dupre la Tour <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import warnings
from ..exceptions import ConvergenceWarning
from ..utils import check_array
from ..utils.extmath import row_norms
from .base import make_dataset
from .sag_fast import sag
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept):
"""Compute automatic step size for SAG solver
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : string, in {"log", "squared"}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
Returns
-------
step_size : float
Step size used in SAG solver.
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
"""
if loss in ('log', 'multinomial'):
# inverse Lipschitz constant for log loss
return 4.0 / (max_squared_sum + int(fit_intercept)
+ 4.0 * alpha_scaled)
elif loss == 'squared':
# inverse Lipschitz constant for squared loss
return 1.0 / (max_squared_sum + int(fit_intercept) + alpha_scaled)
else:
raise ValueError("Unknown loss function for SAG solver, got %s "
"instead of 'log' or 'squared'" % loss)
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1.,
max_iter=1000, tol=0.001, verbose=0, random_state=None,
check_input=True, max_squared_sum=None,
warm_start_mem=None):
"""SAG solver for Ridge and LogisticRegression
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values. With loss='multinomial', y must be label encoded
(see preprocessing.LabelEncoder).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
loss : 'log' | 'squared' | 'multinomial'
Loss function that will be optimized:
-'log' is the binary logistic loss, as used in LogisticRegression.
-'squared' is the squared loss, as used in Ridge.
-'multinomial' is the multinomial logistic loss, as used in
LogisticRegression.
.. versionadded:: 0.18
*loss='multinomial'*
alpha : float, optional
Constant that multiplies the regularization term. Defaults to 1.
max_iter : int, optional
The max number of passes over the training data if the stopping
criteria is not reached. Defaults to 1000.
tol : double, optional
The stopping criteria for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol. Defaults to .001
verbose : integer, optional
The verbosity level.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem : dict, optional
The initialization parameters used for warm starting. Warm starting is
currently used in LogisticRegression but not in Ridge.
It contains:
- 'coef': the weight vector, with the intercept in last line
if the intercept is fitted.
- 'gradient_memory': the scalar gradient for all seen samples.
- 'sum_gradient': the sum of gradient over all seen samples,
for each feature.
- 'intercept_sum_gradient': the sum of gradient over all seen
samples, for the intercept.
- 'seen': array of boolean describing the seen samples.
- 'num_seen': the number of seen samples.
Returns
-------
coef_ : array, shape (n_features)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and possibly the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> X = np.random.randn(n_samples, n_features)
>>> y = np.random.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='sag', tol=0.001)
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
LogisticRegression(C=1.0, class_weight=None, dual=False,
fit_intercept=True, intercept_scaling=1, max_iter=100,
multi_class='ovr', n_jobs=1, penalty='l2', random_state=None,
solver='sag', tol=0.0001, verbose=0, warm_start=False)
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
See also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
if warm_start_mem is None:
warm_start_mem = {}
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='C')
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
# if loss == 'multinomial', y should be label encoded.
n_classes = int(y.max()) + 1 if loss == 'multinomial' else 1
# initialization
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
# assume fit_intercept is False
coef_init = np.zeros((n_features, n_classes), dtype=np.float64,
order='C')
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.shape[0] == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1, :]
coef_init = coef_init[:-1, :]
else:
intercept_init = np.zeros(n_classes, dtype=np.float64)
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient = np.zeros(n_classes, dtype=np.float64)
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros((n_samples, n_classes),
dtype=np.float64, order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros((n_features, n_classes),
dtype=np.float64, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = row_norms(X, squared=True).max()
step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss,
fit_intercept)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError("Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1")
num_seen, n_iter_ = sag(dataset, coef_init,
intercept_init, n_samples,
n_features, n_classes, tol,
max_iter,
loss,
step_size, alpha_scaled,
sum_gradient_init,
gradient_memory_init,
seen_init,
num_seen_init,
fit_intercept,
intercept_sum_gradient,
intercept_decay,
verbose)
if n_iter_ == max_iter:
warnings.warn("The max_iter was reached which means "
"the coef_ did not converge", ConvergenceWarning)
if fit_intercept:
coef_init = np.vstack((coef_init, intercept_init))
warm_start_mem = {'coef': coef_init, 'sum_gradient': sum_gradient_init,
'intercept_sum_gradient': intercept_sum_gradient,
'gradient_memory': gradient_memory_init,
'seen': seen_init, 'num_seen': num_seen}
if loss == 'multinomial':
coef_ = coef_init.T
else:
coef_ = coef_init[:, 0]
return coef_, n_iter_, warm_start_mem
|
bsd-3-clause
|
Microsoft/hummingbird
|
tests/test_sklearn_nb_converter.py
|
1
|
5440
|
"""
Tests sklearn Naive Bayes model (BernoulliNB, GaussianNB, MultinomialNB) converters.
"""
import unittest
import warnings
import numpy as np
import torch
from sklearn.naive_bayes import BernoulliNB, GaussianNB, MultinomialNB
import hummingbird.ml
class TestSklearnNBClassifier(unittest.TestCase):
# BernoulliNB test function to be parameterized
def _test_bernoulinb_classifer(
self, num_classes, alpha=1.0, binarize=None, fit_prior=False, class_prior=None, labels_shift=0
):
model = BernoulliNB(alpha=alpha, binarize=binarize, fit_prior=fit_prior, class_prior=class_prior)
np.random.seed(0)
if binarize is None:
X = np.random.randint(2, size=(100, 200))
else:
X = np.random.rand(100, 200)
X = np.array(X, dtype=np.float32)
y = np.random.randint(num_classes, size=100) + labels_shift
model.fit(X, y)
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(model.predict_proba(X), torch_model.predict_proba(X), rtol=1e-6, atol=1e-5)
# BernoulliNB binary
def test_bernoulinb_classifer_bi(self):
self._test_bernoulinb_classifer(2)
# BernoulliNB multi-class
def test_bernoulinb_classifer_multi(self):
self._test_bernoulinb_classifer(3)
# BernoulliNB multi-class w/ modified alpha
def test_bernoulinb_classifer_multi_alpha(self):
self._test_bernoulinb_classifer(3, alpha=0.5)
# BernoulliNB multi-class w/ binarize
def test_bernoulinb_classifer_multi_binarize(self):
self._test_bernoulinb_classifer(3, binarize=0.5)
# BernoulliNB multi-class w/ fit prior
def test_bernoulinb_classifer_multi_fit_prior(self):
self._test_bernoulinb_classifer(3, fit_prior=True)
# BernoulliNB multi-class w/ class prior
def test_bernoulinb_classifer_multi_class_prior(self):
np.random.seed(0)
class_prior = np.random.rand(3)
self._test_bernoulinb_classifer(3, class_prior=class_prior)
# BernoulliNB multi-class w/ labels shift
def test_bernoulinb_classifer_multi_labels_shift(self):
self._test_bernoulinb_classifer(3, labels_shift=3)
# MultinomialNB test function to be parameterized
def _test_multinomialnb_classifer(self, num_classes, alpha=1.0, fit_prior=False, class_prior=None, labels_shift=0):
model = MultinomialNB(alpha=alpha, fit_prior=fit_prior, class_prior=class_prior)
np.random.seed(0)
X = np.random.rand(100, 200)
X = np.array(X, dtype=np.float32)
y = np.random.randint(num_classes, size=100) + labels_shift
model.fit(X, y)
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(model.predict_proba(X), torch_model.predict_proba(X), rtol=1e-6, atol=1e-5)
# MultinomialNB binary
def test_multinomialnb_classifer_bi(self):
self._test_bernoulinb_classifer(2)
# MultinomialNB multi-class
def test_multinomialnb_classifer_multi(self):
self._test_bernoulinb_classifer(3)
# MultinomialNB multi-class w/ modified alpha
def test_multinomialnb_classifer_multi_alpha(self):
self._test_bernoulinb_classifer(3, alpha=0.5)
# MultinomialNB multi-class w/ fir prior
def test_multinomialnb_classifer_multi_fit_prior(self):
self._test_bernoulinb_classifer(3, fit_prior=True)
# MultinomialNB multi-class w/ class prior
def test_multinomialnb_classifer_multi_class_prior(self):
np.random.seed(0)
class_prior = np.random.rand(3)
self._test_bernoulinb_classifer(3, class_prior=class_prior)
# BernoulliNB multi-class w/ labels shift
def test_multinomialnb_classifer_multi_labels_shift(self):
self._test_bernoulinb_classifer(3, labels_shift=3)
# GaussianNB test function to be parameterized
def _test_gaussiannb_classifer(self, num_classes, priors=None, var_smoothing=1e-9, labels_shift=0):
model = GaussianNB(priors=priors, var_smoothing=var_smoothing)
np.random.seed(0)
X = np.random.rand(100, 200)
X = np.array(X, dtype=np.float32)
y = np.random.randint(num_classes, size=100) + labels_shift
model.fit(X, y)
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(model.predict_proba(X), torch_model.predict_proba(X), rtol=1e-6, atol=1e-5)
# GaussianNB binary
def test_gaussiannb_classifer_bi(self):
self._test_gaussiannb_classifer(2)
# GaussianNB multi-class
def test_gaussiannb_classifer_multi(self):
self._test_gaussiannb_classifer(3)
# GaussianNB multi-class w/ class prior
def test_gaussiannb_classifer_multi_class_prior(self):
np.random.seed(0)
priors = np.random.rand(3)
priors = priors / np.sum(priors)
self._test_gaussiannb_classifer(3, priors=priors)
# GaussianNB multi-class w/ modified var_smoothing
def test_gaussiannb_classifer_multi_alpha(self):
self._test_gaussiannb_classifer(3, var_smoothing=1e-2)
# GaussianNB multi-class w/ labels shift
def test_gaussiannb_classifer_multi_labels_shift(self):
self._test_gaussiannb_classifer(3, labels_shift=3)
if __name__ == "__main__":
unittest.main()
|
mit
|
markneville/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_mixed.py
|
70
|
3776
|
from matplotlib._image import frombuffer
from matplotlib.backends.backend_agg import RendererAgg
class MixedModeRenderer(object):
"""
A helper class to implement a renderer that switches between
vector and raster drawing. An example may be a PDF writer, where
most things are drawn with PDF vector commands, but some very
complex objects, such as quad meshes, are rasterised and then
output as images.
"""
def __init__(self, width, height, dpi, vector_renderer, raster_renderer_class=None):
"""
width: The width of the canvas in logical units
height: The height of the canvas in logical units
dpi: The dpi of the canvas
vector_renderer: An instance of a subclass of RendererBase
that will be used for the vector drawing.
raster_renderer_class: The renderer class to use for the
raster drawing. If not provided, this will use the Agg
backend (which is currently the only viable option anyway.)
"""
if raster_renderer_class is None:
raster_renderer_class = RendererAgg
self._raster_renderer_class = raster_renderer_class
self._width = width
self._height = height
self.dpi = dpi
assert not vector_renderer.option_image_nocomposite()
self._vector_renderer = vector_renderer
self._raster_renderer = None
self._rasterizing = 0
self._set_current_renderer(vector_renderer)
_methods = """
close_group draw_image draw_markers draw_path
draw_path_collection draw_quad_mesh draw_tex draw_text
finalize flipy get_canvas_width_height get_image_magnification
get_texmanager get_text_width_height_descent new_gc open_group
option_image_nocomposite points_to_pixels strip_math
""".split()
def _set_current_renderer(self, renderer):
self._renderer = renderer
for method in self._methods:
if hasattr(renderer, method):
setattr(self, method, getattr(renderer, method))
renderer.start_rasterizing = self.start_rasterizing
renderer.stop_rasterizing = self.stop_rasterizing
def start_rasterizing(self):
"""
Enter "raster" mode. All subsequent drawing commands (until
stop_rasterizing is called) will be drawn with the raster
backend.
If start_rasterizing is called multiple times before
stop_rasterizing is called, this method has no effect.
"""
if self._rasterizing == 0:
self._raster_renderer = self._raster_renderer_class(
self._width*self.dpi, self._height*self.dpi, self.dpi)
self._set_current_renderer(self._raster_renderer)
self._rasterizing += 1
def stop_rasterizing(self):
"""
Exit "raster" mode. All of the drawing that was done since
the last start_rasterizing command will be copied to the
vector backend by calling draw_image.
If stop_rasterizing is called multiple times before
start_rasterizing is called, this method has no effect.
"""
self._rasterizing -= 1
if self._rasterizing == 0:
self._set_current_renderer(self._vector_renderer)
width, height = self._width * self.dpi, self._height * self.dpi
buffer, bounds = self._raster_renderer.tostring_rgba_minimized()
l, b, w, h = bounds
if w > 0 and h > 0:
image = frombuffer(buffer, w, h, True)
image.is_grayscale = False
image.flipud_out()
self._renderer.draw_image(l, height - b - h, image, None)
self._raster_renderer = None
self._rasterizing = False
|
agpl-3.0
|
tawsifkhan/scikit-learn
|
sklearn/ensemble/voting_classifier.py
|
178
|
8006
|
"""
Soft Voting/Majority Rule classifier.
This module contains a Soft Voting/Majority Rule classifier for
classification estimators.
"""
# Authors: Sebastian Raschka <[email protected]>,
# Gilles Louppe <[email protected]>
#
# Licence: BSD 3 clause
import numpy as np
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import TransformerMixin
from ..base import clone
from ..preprocessing import LabelEncoder
from ..externals import six
class VotingClassifier(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
Read more in the :ref:`User Guide <voting_classifier>`.
Parameters
----------
estimators : list of (string, estimator) tuples
Invoking the `fit` method on the `VotingClassifier` will fit clones
of those original estimators that will be stored in the class attribute
`self.estimators_`.
voting : str, {'hard', 'soft'} (default='hard')
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probalities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like, shape = [n_classifiers], optional (default=`None`)
Sequence of weights (`float` or `int`) to weight the occurances of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
Attributes
----------
classes_ : array-like, shape = [n_predictions]
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier
>>> clf1 = LogisticRegression(random_state=1)
>>> clf2 = RandomForestClassifier(random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1])
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>>
"""
def __init__(self, estimators, voting='hard', weights=None):
self.estimators = estimators
self.named_estimators = dict(estimators)
self.voting = voting
self.weights = weights
def fit(self, X, y):
""" Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
raise NotImplementedError('Multilabel and multi-output'
' classification is not supported.')
if self.voting not in ('soft', 'hard'):
raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)"
% self.voting)
if self.weights and len(self.weights) != len(self.estimators):
raise ValueError('Number of classifiers and weights must be equal'
'; got %d weights, %d estimators'
% (len(self.weights), len(self.estimators)))
self.le_ = LabelEncoder()
self.le_.fit(y)
self.classes_ = self.le_.classes_
self.estimators_ = []
for name, clf in self.estimators:
fitted_clf = clone(clf).fit(X, self.le_.transform(y))
self.estimators_.append(fitted_clf)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions)
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilties calculated by each classifier.
If `voting='hard'`:
array-like = [n_classifiers, n_samples]
Class labels predicted by each classifier.
"""
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def get_params(self, deep=True):
"""Return estimator parameter names for GridSearch support"""
if not deep:
return super(VotingClassifier, self).get_params(deep=False)
else:
out = super(VotingClassifier, self).get_params(deep=False)
out.update(self.named_estimators.copy())
for name, step in six.iteritems(self.named_estimators):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict(X) for clf in self.estimators_]).T
|
bsd-3-clause
|
agoose77/hivesystem
|
manual/movingpanda/panda-10.py
|
1
|
3997
|
import dragonfly
import dragonfly.pandahive
import bee
from bee import connect
import math, functools
from panda3d.core import NodePath
import dragonfly.scene.unbound
import dragonfly.std
import dragonfly.io
import dragonfly.canvas
import Spyder
# ## random matrix generator
from random import random
def random_matrix_generator():
while 1:
a = Spyder.AxisSystem()
a.rotateZ(360 * random())
a.origin = Spyder.Coordinate(15 * random() - 7.5, 15 * random() - 7.5, 0)
yield dragonfly.scene.matrix(a, "AxisSystem")
def id_generator():
n = 0
while 1:
n += 1
yield "spawnedpanda" + str(n)
from dragonfly.canvas import box2d
from bee.mstr import mstr
class parameters: pass
class myscene(dragonfly.pandahive.spyderframe):
a = Spyder.AxisSystem()
a *= 0.25
a.origin += (-8, 42, 0)
env = Spyder.Model3D("models/environment", "egg", a)
a = Spyder.AxisSystem()
a *= 0.005
mypanda = Spyder.Actor3D("models/panda-model", "egg", [("walk", "models/panda-walk4", "egg")], a,
entityname="mypanda")
a = Spyder.AxisSystem()
a *= 0.005
pandaclass = Spyder.ActorClass3D("models/panda-model", "egg", [("walk", "models/panda-walk4", "egg")], a,
actorclassname="pandaclass")
box = Spyder.Box2D(50, 470, 96, 96)
icon = Spyder.Icon("pandaicon.png", "pandaicon", box, transparency=True)
del a, box
class myhive(dragonfly.pandahive.pandahive):
pandaname = "mypanda"
pandaname_ = bee.attribute("pandaname")
pandaclassname = "pandaclass"
pandaclassname_ = bee.attribute("pandaclassname")
canvas = dragonfly.pandahive.pandacanvas()
mousearea = dragonfly.canvas.mousearea()
raiser = bee.raiser()
connect("evexc", raiser)
animation = dragonfly.scene.unbound.animation()
pandaid = dragonfly.std.variable("id")(pandaname_)
walk = dragonfly.std.variable("str")("walk")
connect(pandaid, animation.actor)
connect(walk, animation.animation_name)
key_w = dragonfly.io.keyboardsensor_trigger("W")
connect(key_w, animation.loop)
key_s = dragonfly.io.keyboardsensor_trigger("S")
connect(key_s, animation.stop)
setPos = dragonfly.scene.unbound.setPos()
connect(pandaid, setPos)
interval = dragonfly.time.interval_time(8)
connect(key_w, interval.start)
connect(key_s, interval.pause)
ip = dragonfly.time.interpolation("Coordinate")((0, 0, 0), (0, -10, 0))
connect(interval.value, ip)
connect(ip, setPos)
connect(key_w, ip.start)
connect(key_s, ip.stop)
pandaspawn = dragonfly.scene.spawn_actor()
v_panda = dragonfly.std.variable("id")(pandaclassname_)
connect(v_panda, pandaspawn)
panda_id = dragonfly.std.generator("id", id_generator)()
random_matrix = dragonfly.std.generator(("object", "matrix"), random_matrix_generator)()
w_spawn = dragonfly.std.weaver(("id", ("object", "matrix")))()
connect(panda_id, w_spawn.inp1)
connect(random_matrix, w_spawn.inp2)
do_spawn = dragonfly.std.transistor(("id", ("object", "matrix")))()
connect(w_spawn, do_spawn)
connect(do_spawn, pandaspawn.spawn_matrix)
key_z = dragonfly.io.keyboardsensor_trigger("Z")
connect(key_z, do_spawn)
pandaicon_click = dragonfly.io.mouseareasensor("pandaicon")
connect(pandaicon_click, do_spawn)
myscene = myscene(
scene="scene",
canvas=canvas,
mousearea=mousearea,
)
main = myhive().getinstance()
main.build("main")
main.place()
main.close()
main.init()
from direct.task import Task
def spinCameraTask(camera, task):
angleDegrees = task.time * 30.0
angleRadians = angleDegrees * (math.pi / 180.0)
camera.setPos(20 * math.sin(angleRadians), -20.0 * math.cos(angleRadians), 3)
camera.setHpr(angleDegrees, 0, 0)
return Task.cont
main.window.taskMgr.add(functools.partial(spinCameraTask, main.window.camera), "SpinCameraTask")
main.run()
|
bsd-2-clause
|
shenzebang/scikit-learn
|
sklearn/cluster/birch.py
|
207
|
22706
|
# Authors: Manoj Kumar <[email protected]>
# Alexandre Gramfort <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import NotFittedError, check_is_fitted
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, insted of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accomodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
Every new sample is inserted into the root of the Clustering Feature
Tree. It is then clubbed together with the subcluster that has the
centroid closest to the new sample. This is done recursively till it
ends up at the subcluster of the leaf of the tree has the closest centroid.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
the node has to be split. The corresponding parent also has to be
split and if the number of subclusters in the parent is greater than
the branching factor, then it has to be split recursively.
n_clusters : int, instance of sklearn.cluster model, default None
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples. By default, this final
clustering step is not performed and the subclusters are returned
as they are. If a model is provided, the model is fit treating
the subclusters as new samples and the initial data is mapped to the
label of the closest subcluster. If an int is provided, the model
fit is AgglomerativeClustering with n_clusters set to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/p/jbirch/
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves: array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels: ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
|
bsd-3-clause
|
d10genes/nyt-nlp
|
nyt-nlp.py
|
1
|
14910
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# {"Title": "Modeling 20 years of scandals with python",
# "Date": "2013-7-4",
# "Category": "ipython",
# "Tags": "nlp, ipython, gensim, pandas",
# "slug": "scandal-modeling-with-python",
# "Author": "Chris"
# }
# <markdowncell>
# #Topic detection with Pandas and Gensim
#
# A few months ago, the [undending](http://en.wikipedia.org/wiki/2013_IRS_scandal
# ) [series](http://thelead.blogs.cnn.com/2013/08/01/exclusive-dozens-of-cia-operatives-on-the-ground-during-benghazi-attack/
# ) of [recent](http://www.usatoday.com/story/news/2013/05/13/justice-department-associated-press-telephone-records/2156521/) [scandals](http://en.wikipedia.org/wiki/2013_mass_surveillance_scandal
# ) inspired me to see whether it would be possible to comb through the text of New York Times articles and automatically detect and identify different scandals that have occurred. I wanted to see if, given articles about the DOJ, IRS, NSA and all the rest, whether the text would be enough for an algorithm to identify them as distinct scandals and distinguish them from one another, in an unsupervised fashion.
#
# This also gave me an excuse to explore [gensim](http://radimrehurek.com/gensim/) and show off some of [pandas](http://pandas.pydata.org/) capabilities for data-wrangling.
#
# The IPython notebook for this post is available at [this repo](https://github.com/d10genes/nyt-nlp) (and I grabbed the ggplot-esque [plot settings](http://matplotlib.org/users/customizing.html) from [Probabilistic Programming for Hackers](https://github.com/CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers/tree/master/styles)).
#
# Let's get started by by picking up where we left off scraping the data in [part 1](|filename|nyt-scraping.ipynb), and pull all those articles out of mongo.
# <codecell>
from __future__ import division
import json
import pandas as pd
import numpy as np
from time import sleep
import itertools
import pymongo
import re
from operator import itemgetter
from gensim import corpora, models, similarities
import gensim
from collections import Counter
import datetime as dt
import matplotlib.pyplot as plt
pd.options.display.max_columns = 30
pd.options.display.notebook_repr_html = False
# <codecell>
%load_ext autosave
%autosave 30
# <markdowncell>
# ##Init
# <codecell>
connection = pymongo.Connection("localhost", 27017)
db = connection.nyt
# <codecell>
raw = list(db.raw_text.find({'text': {'$exists': True}}))
# <markdowncell>
# I ran this the first time, to make sure it doesn't choke on title-less documents (there should be code to fix it in pt. 1 now, though):
#
# for dct in raw:
# if 'title' not in dct:
# dct['title'] = ''
# <markdowncell>
# ###Some helpful functions
# The `format` function should be pretty self-explanatory, and `search` is to be used later on to verify topic words.
# <codecell>
def format(txt):
"""Turns a text document to a list of formatted words.
Get rid of possessives, special characters, multiple spaces, etc.
"""
tt = re.sub(r"'s\b", '', txt).lower() #possessives
tt = re.sub(r'[\.\,\;\:\'\"\(\)\&\%\*\+\[\]\=\?\!/]', '', tt) #weird stuff
tt = re.sub(r' *\$[0-9]\S* ?', ' <money> ', tt) #dollar amounts
tt = re.sub(r' *[0-9]\S* ?', ' <num> ', tt)
tt = re.sub(r'[\-\s]+', ' ', tt) #hyphen -> space
tt = re.sub(r' [a-z] ', ' ', tt) # single letter -> space
return tt.strip().split()
def search(wrd, df=True):
"""Searches through `raw` list of documents for term `wrd` (case-insensitive).
Returns titles and dates of matching articles, sorted by date. Returns
DataFrame by default.
"""
wrd = wrd.lower()
_srch = lambda x: wrd in x['text'].lower()
title_yr = ((b['title'], b['date'].year) for b in filter(_srch, raw))
ret = sorted(title_yr, key=itemgetter(1))
return pd.DataFrame(ret, columns=['Title', 'Year']) if df else ret
dmap = lambda dct, a: [dct[e] for e in a]
# <markdowncell>
# ##Model generation
# Now apply the `format` function to all the text, and convert it to a dictionary of word counts per document form that gensim's models can work with. The `TfidfModel` transformation will [take into account](en.wikipedia.org/wiki/Tf–idf) how common a word is in a certain document compared to how common it is overall (so the algorithm won't just be looking at the most common, but uninformative words like *the* or *and*).
# <codecell>
texts = [format(doc['text']) for doc in raw]
dictionary = corpora.Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
tfidf = models.TfidfModel(corpus)
tcorpus = dmap(tfidf, corpus)
# <codecell>
np.random.seed(42)
model = models.lsimodel.LsiModel(corpus=tcorpus, id2word=dictionary, num_topics=15)
# <markdowncell>
# ##Analysis
# <markdowncell>
# As far as I know, the only way to get the topic information for each article after fitting the model is looping through and manually grabbing the topic-score list for each article.
# <codecell>
_kwargs = dict(formatted=0, num_words=20)
topic_words = [[w for _, w in tups] for tups in model.show_topics(**_kwargs)]
# <codecell>
%%time
it = itertools.izip(corpus, ((d['title'], d['date']) for d in raw))
topic_data = [] #for each article, collect topic with highest score
_topic_stats = [] # gets all topic-score pairs for each document
for corp_txt, (tit, date) in it:
_srtd = sorted(model[corp_txt], key=itemgetter(1), reverse=1)
top, score = _srtd[0]
topic_data.append((tit, date, top, score))
_topic_stats.append(_srtd)
topic_stats = [tup for tups in _topic_stats for tup in tups] #flatten list(tuples) -> list
# <markdowncell>
# The `topic_data` and `_topic_stats` lists keep data on each article and sorted lists of topic-score tuples:
# <codecell>
print topic_data[0]
print _topic_stats[0]
# <markdowncell>
# Now we can put the topic information into pandas, for faster, easier analysis.
# <codecell>
df = pd.DataFrame(topic_data, columns=['Title', 'Date', 'Topic', 'Score'])
df.Date = df.Date.map(lambda d: d.date())
print df.shape
df.head()
# <markdowncell>
# By plotting the distribution of topic labels for each document, we can now see that the detected topics are not very evenly distributed.
# <codecell>
vc = df.Topic.value_counts()
plt.bar(vc.index, vc)
_ = plt.ylabel('Topic count')
# df.Topic.value_counts() #This would give the actual frequency values
# <markdowncell>
# One high level question I had was if certain topics can be seen varying in frequency over time. Pandas' `groupby` can be used to aggregate the article counts by year and topic:
# <codecell>
year = lambda x: x.year
sz = df.set_index('Date').groupby(['Topic', year]).size()
sz.index.names, sz.name = ['Topic', 'Year'], 'Count'
sz = sz.reset_index()
sz.head()
# <markdowncell>
# which can then be reshaped with `pivot`, giving us a Year $\times$ Topic grid:
# <codecell>
top_year = sz.pivot(index='Year', columns='Topic', values='Count').fillna(0)
top_year
# <markdowncell>
# In Pandas land it's easy to find lots of basic information about the distribution--a simple boxplot will give us a good idea of the min/median/max number of times a topic was represented over the 21 years.
# <codecell>
plt.figure(figsize=(12, 8))
top_year.boxplot() and None
# <markdowncell>
# Topics 8, 9, 11 and 12 hardly show up, while in typical years topics like 1 and 2 are heavily represented. The plot also shows that articles most closely associated with topic 2 actually show up 250 times for one year.
#
# (For the curious, viewing the distribution of scandalous articles across topics for each year is as easy as `top_year.T.boxplot()`.)
#
# The `plot` method can automatically plot each column as a separate time series, which can give a view of the trend for each scandal-topic:
# <codecell>
_ = top_year.plot(figsize=(12, 8))
# <markdowncell>
# The number of times articles with different topics show up in a year varies a lot for most of the topics. It even looks like there are a few years, like 1998 and 2006 where multiple topics spike. Plotting the sum of articles for all topics in a given year
# can verify this:
# <codecell>
_ = top_year.sum(axis=1).plot()
# <markdowncell>
# ###Topic words
# Now it's time to look at the words that the model associated with each topic, to see if it's possible to infer what each detected topic is about. Stacking all the words of the topics and getting the value counts gives an idea of how often certain words show up among the topics:
# <codecell>
pd.options.display.max_rows = 22
top_wds_df = pd.DataFrame(zip(*topic_words))
vc = top_wds_df.stack(0).value_counts()
print vc
pd.options.display.max_rows = 400
# <markdowncell>
# It looks like the most common topic words are *page*, *enron*, *bush* and *clinton*, with *gore* just behind. It seems these words might be less helpful at finding the meaning of topics since they're closely associated with practically every topic of political scandal in the past two decades. It shouldn't be surprising that presidents show up among the most common topic words, and a cursory look at articles with the word *page* (using `search`, defined above) makes it look like the word shows up both for sexual scandals involving pages, along with a bunch references to *front page scandals* or the *op-ed page*.
#
# You can find specific headlines from my dataset that include the word *page* (which [duckduckgo](duckduckgo.com) should be able to handle) with `search('page')`.
#
# In the following, I've given a simple score to the topic words based on how unique they are (from a low score of 0 for the most common, up to 11 for words that only appear for a single topic). All 15 topics are summarized below with the top words scored by how common they are.
#
# Topics 1 and 6 look to have the most cliched scandal words, while the last few topics are characterized by quite a few unique words.
# <codecell>
pd.options.display.line_width = 130
top_wd_freq = {w: '{}-{}'.format(w, vc.max() - cnt) for w, cnt in vc.iteritems()}
top_wds_df.apply(lambda s: s.map(top_wd_freq))
# <markdowncell>
# ##Story telling
# <markdowncell>
# Now comes the fun part, where we can try to find explanations for the choices of topics generated by Gensim's implementation of [LSI](http://en.wikipedia.org/wiki/Latent_semantic_indexing). While LSI can be very good at finding hidden factors and relationships (i.e., topics) from different documents, there is no way that I'm aware of to easily interpret the algorithm to see why it groups documents with certain topics. The best way I know is to eyeball it, which we can do from the topic-word dataframe above.
#
# For example, topics 1 and 5 include the words *impeachment, lewinsky, gore, clinton* and *starr*, so it's probably a safe bet to say they're referring to the [Lewinsky scandal](http://en.wikipedia.org/wiki/Lewinsky_scandal). And looking at the topic-year plot from above (`In [17]`), we can see that at least topic 5 has a major spike in the years following the scandal.
#
# Both also include the rather high-scoring terms *prime* and *minister*, which are probably indicative of the large number of world news summaries included under the topics. For example, 343 of Topic 1's articles have the title *News Summary*, while no other topic has even 40 summaries:
# <codecell>
t1 = df[df.Topic == 1].Title.value_counts()
t1[t1 > 10]
# <markdowncell>
# Topic 3 looks like it's associated with state- and city-level scandals in the New England region. Aside from the cliched terms, we have *rowland* and *rell*, likely in reference to [corruption in Connecticut](http://en.wikipedia.org/wiki/John_G._Rowland#Corruption_as_Governor), and some more pretty specific indicators like *mayor, governor, cuomo, spitzer, city, state* and *albany*.
#
# Topic 12 looks like it covers New Jersey pretty well. Other than the state's name itself as one of the topic words, you've got [Corzine](en.wikipedia.org/wiki/Jon_Corzine), [Torricelli](http://en.wikipedia.org/wiki/Robert_Torricelli), [Codey](http://en.wikipedia.org/wiki/Richard_Codey), [Schundler](http://en.wikipedia.org/wiki/Bret_Schundler) and [Lautenberg](http://en.wikipedia.org/wiki/Frank_Lautenberg), none of which appear outside of this topic except for *Corzine*.
#
# Several look international in nature, especially topic 9, which has strong Italian (*berlusconi, italy, italian* and the unique *andreotti* terms) and Japanese (*japan, japanese, [Ozawa](http://en.wikipedia.org/wiki/Ichir%C5%8D_Ozawa), [Hosokawa](http://en.wikipedia.org/wiki/Morihiro_Hosokawa)* and *[Kanemaru](http://en.wikipedia.org/wiki/Shin_Kanemaru)*) showings, and also uniquely identifies German chancellor [Helmut Kohl](http://en.wikipedia.org/wiki/Helmut_Kohl).
#
# Topic 13 seems to represent public finance scandals, with unique terms *budget, tax, percent, billion* and *plan*, while topic 8 looks like it pertains more to campaign finance, with unique terms *soft, money, raising, earmarks* and *lobbyists*. Topic 7 looks like it has to do with corporate scandals, leading with the admittedly pervasive *enron* term, but with largely unique terms *accounting, stock, corporate, attorney, counsel, investigation, companies* and *justice* [as in Department of...?] as well.
#
# And finally the 2nd topic appears to have a lot of legislative factors in it, with terms unique terms *house, senate, lawmakers, ethics, committee, bill* and *parliament*.
#
# ##Conclusion
#
# The results give a much less fine-grained view of scandals than what I was expecting, either because of the sources (not enough articles devoted specifically enough to particular scandals? text not sufficiently preprocessed) or the algorithm (wrong algorithm for the task? wrong settings?). Plus, it turns out there have been a *lot* of American political scandals in the last 20 years. Perhaps more clear patterns could be discerned by expanding the number of topics.
#
# The detected topics seem to have a lot of noise (for example, the presidents' names show up as key words in *every* topic), possibly due to the imbalance from some scandals being cited more frequently than others. But when you cut out the noise and try to characterize the topics by the more infrequent key words, I was surprised by the topic clusters it was actually able to detect, from international scandals to corporate scandals to Jersey scandals. I was unfortunately not able to detect the recent set of scandals, but from the experiment, the good scandals seem to require a few years to age before there is enough data to detect them. Hopefully today's events will be easy enough to spot by rerunning this in a few months or years.
#
# All in all, it was a fun exercise and a good reminder of the strong tradition of corruption we're part of.
|
bsd-3-clause
|
arlewis/galaxy_cutouts
|
versions/extract_stamp_good_wt_rrhr_ims.py
|
2
|
22185
|
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
import os
import numpy as np
import montage_wrapper as montage
import shutil
import sys
import glob
import time
from matplotlib.path import Path
from scipy.ndimage import zoom
from pdb import set_trace
_TOP_DIR = '/data/tycho/0/leroy.42/allsky/'
_INDEX_DIR = os.path.join(_TOP_DIR, 'code/')
_HOME_DIR = '/n/home00/lewis.1590/research/galbase_allsky/'
_MOSAIC_DIR = os.path.join(_HOME_DIR, 'cutouts')
def calc_tile_overlap(ra_ctr, dec_ctr, pad=0.0, min_ra=0., max_ra=180., min_dec=-90., max_dec=90.):
overlap = ((min_dec - pad) < dec_ctr) & ((max_dec + pad) > dec_ctr)
#TRAP HIGH LATITUDE CASE AND (I GUESS) TOSS BACK ALL TILES. DO BETTER LATER
mean_dec = (min_dec + max_dec) * 0.5
if np.abs(dec_ctr) + pad > 88.0:
return overlap
ra_pad = pad / np.cos(np.radians(mean_dec))
# MERIDIAN CASES
merid = np.where(max_ra < min_ra)
overlap[merid] = overlap[merid] & ( ((min_ra-ra_pad) < ra_ctr) | ((max_ra+ra_pad) > ra_ctr) )[merid]
# BORING CASE
normal = np.where(max_ra > min_ra)
overlap[normal] = overlap[normal] & ((((min_ra-ra_pad) < ra_ctr) & ((max_ra+ra_pad) > ra_ctr)))[normal]
return overlap
def make_axes(hdr, quiet=False, novec=False, vonly=False, simple=False):
# PULL THE IMAGE/CUBE SIZES FROM THE HEADER
naxis = hdr['NAXIS']
naxis1 = hdr['NAXIS1']
naxis2 = hdr['NAXIS2']
if naxis > 2:
naxis3 = hdr['NAXIS3']
## EXTRACT FITS ASTROMETRY STRUCTURE
ww = pywcs.WCS(hdr)
#IF DATASET IS A CUBE THEN WE MAKE THE THIRD AXIS IN THE SIMPLEST WAY POSSIBLE (NO COMPLICATED ASTROMETRY WORRIES FOR FREQUENCY INFORMATION)
if naxis > 3:
#GRAB THE RELEVANT INFORMATION FROM THE ASTROMETRY HEADER
cd = ww.wcs.cd
crpix = ww.wcs.crpix
cdelt = ww.wcs.crelt
crval = ww.wcs.crval
if naxis > 2:
# MAKE THE VELOCITY AXIS (WILL BE M/S)
v = np.arange(naxis3) * 1.0
vdif = v - (hdr['CRPIX3']-1)
vaxis = (vdif * hdr['CDELT3'] + hdr['CRVAL3'])
# CUT OUT HERE IF WE ONLY WANT VELOCITY INFO
if vonly:
return vaxis
#IF 'SIMPLE' IS CALLED THEN DO THE REALLY TRIVIAL THING:
if simple:
print('Using simple aproach to make axes.')
print('BE SURE THIS IS WHAT YOU WANT! It probably is not.')
raxis = np.arange(naxis1) * 1.0
rdif = raxis - (hdr['CRPIX1'] - 1)
raxis = (rdif * hdr['CDELT1'] + hdr['CRVAL1'])
daxis = np.arange(naxis2) * 1.0
ddif = daxis - (hdr['CRPIX1'] - 1)
daxis = (ddif * hdr['CDELT1'] + hdr['CRVAL1'])
rimg = raxis # (fltarr(naxis2) + 1.)
dimg = (np.asarray(naxis1) + 1.) # daxis
return rimg, dimg
# OBNOXIOUS SFL/GLS THING
glspos = ww.wcs.ctype[0].find('GLS')
if glspos != -1:
ctstr = ww.wcs.ctype[0]
newtype = 'SFL'
ctstr.replace('GLS', 'SFL')
ww.wcs.ctype[0] = ctstr
print('Replaced GLS with SFL; CTYPE1 now =' + ww.wcs.ctype[0])
glspos = ww.wcs.ctype[1].find('GLS')
if glspos != -1:
ctstr = ww.wcs.ctype[1]
newtype = 'SFL'
ctstr.replace('GLS', 'SFL')
ww.wcs.ctype[1] = ctstr
print('Replaced GLS with SFL; CTYPE2 now = ' + ww.wcs.ctype[1])
# CALL 'xy2ad' TO FIND THE RA AND DEC FOR EVERY POINT IN THE IMAGE
if novec:
rimg = np.zeros((naxis1, naxis2))
dimg = np.zeros((naxis1, naxis2))
for i in range(naxis1):
j = np.asarray([0 for i in xrange(naxis2)])
pixcrd = np.array([[zip(float(i), float(j))]], numpy.float_)
ra, dec = ww.all_pix2world(pixcrd, 1)
rimg[i, :] = ra
dimg[i, :] = dec
else:
ximg = np.arange(naxis1) * 1.0
yimg = np.arange(naxis1) * 1.0
X, Y = np.meshgrid(ximg, yimg, indexing='xy')
ss = X.shape
xx, yy = X.flatten(), Y.flatten()
pixcrd = np.array(zip(xx, yy), np.float_)
img_new = ww.all_pix2world(pixcrd, 0)
rimg_new, dimg_new = img_new[:,0], img_new[:,1]
rimg = rimg_new.reshape(ss)
dimg = dimg_new.reshape(ss)
# GET AXES FROM THE IMAGES. USE THE CENTRAL COLUMN AND CENTRAL ROW
raxis = np.squeeze(rimg[:, naxis2/2])
daxis = np.squeeze(dimg[naxis1/2, :])
return rimg, dimg
def write_headerfile(header_file, header):
f = open(header_file, 'w')
for iii in range(len(header)):
outline = str(header[iii:iii+1]).strip().rstrip('END').strip()+'\n'
f.write(outline)
f.close()
def create_hdr(ra_ctr, dec_ctr, pix_len, pix_scale):
hdr = pyfits.Header()
hdr['NAXIS'] = 2
hdr['NAXIS1'] = pix_len
hdr['NAXIS2'] = pix_len
hdr['CTYPE1'] = 'RA---TAN'
hdr['CRVAL1'] = float(ra_ctr)
hdr['CRPIX1'] = (pix_len / 2.) * 1.
hdr['CDELT1'] = -1.0 * pix_scale
hdr['CTYPE2'] = 'DEC--TAN'
hdr['CRVAL2'] = float(dec_ctr)
hdr['CRPIX2'] = (pix_len / 2.) * 1.
hdr['CDELT2'] = pix_scale
hdr['EQUINOX'] = 2000
return hdr
def unwise(band=None, ra_ctr=None, dec_ctr=None, size_deg=None, index=None, name=None):
tel = 'unwise'
data_dir = os.path.join(_TOP_DIR, tel, 'sorted_tiles')
# READ THE INDEX FILE (IF NOT PASSED IN)
if index is None:
indexfile = os.path.join(_INDEX_DIR, tel + '_index_file.fits')
ext = 1
index, hdr = pyfits.getdata(indexfile, ext, header=True)
# CALIBRATION TO GO FROM VEGAS TO ABMAG
w1_vtoab = 2.683
w2_vtoab = 3.319
w3_vtoab = 5.242
w4_vtoab = 6.604
# NORMALIZATION OF UNITY IN VEGAS MAG
norm_mag = 22.5
pix_as = 2.75 #arcseconds - native detector pixel size wise docs
# COUNTS TO JY CONVERSION
w1_to_mjysr = counts2jy(norm_mag, w1_vtoab, pix_as)
w2_to_mjysr = counts2jy(norm_mag, w2_vtoab, pix_as)
w3_to_mjysr = counts2jy(norm_mag, w3_vtoab, pix_as)
w4_to_mjysr = counts2jy(norm_mag, w4_vtoab, pix_as)
# MAKE A HEADER
pix_scale = 2.0 / 3600. # 2.0 arbitrary
pix_len = size_deg / pix_scale
# this should automatically populate SIMPLE and NAXIS keywords
target_hdr = create_hdr(ra_ctr, dec_ctr, pix_len, pix_scale)
# CALCULATE TILE OVERLAP
tile_overlaps = calc_tile_overlap(ra_ctr, dec_ctr, pad=size_deg,
min_ra=index['MIN_RA'],
max_ra=index['MAX_RA'],
min_dec=index['MIN_DEC'],
max_dec=index['MAX_DEC'])
# FIND OVERLAPPING TILES WITH RIGHT BAND
# index file set up such that index['BAND'] = 1, 2, 3, 4 depending on wise band
ind = np.where((index['BAND'] == band) & tile_overlaps)
ct_overlap = len(ind[0])
# SET UP THE OUTPUT
ri_targ, di_targ = make_axes(target_hdr)
sz_out = ri_targ.shape
outim = ri_targ * np.nan
# LOOP OVER OVERLAPPING TILES AND STITCH ONTO TARGET HEADER
for ii in range(0, ct_overlap):
infile = os.path.join(data_dir, index[ind[ii]]['FNAME'])
im, hdr = pyfits.getdata(infile, header=True)
ri, di = make_axes(hdr)
hh = pywcs.WCS(target_hdr)
x, y = ww.all_world2pix(zip(ri, di), 1)
in_image = (x > 0 & x < (sz_out[0]-1)) & (y > 0 and y < (sz_out[1]-1))
if np.sum(in_image) == 0:
print("No overlap. Proceeding.")
continue
if band == 1:
im *= w1_to_mjysr
if band == 2:
im *= w2_to_mjysr
if band == 3:
im *= w3_to_mjysr
if band == 4:
im *= w4_to_mjysr
target_hdr['BUNIT'] = 'MJY/SR'
newimfile = reprojection(infile, im, hdr, target_hdr, data_dir)
im, new_hdr = pyfits.getdata(newimfile, header=True)
useful = np.where(np.isfinite(im))
outim[useful] = im[useful]
return outim, target_hdr
def counts2jy(norm_mag, calibration_value, pix_as):
# convert counts to Jy
val = 10.**((norm_mag + calibration_value) / -2.5)
val *= 3631.0
# then to MJy
val /= 1e6
# then to MJy/sr
val /= np.radians(pix_as / 3600.)**2
return val
def galex(band='fuv', ra_ctr=None, dec_ctr=None, size_deg=None, index=None, name=None, write_info=True):
tel = 'galex'
data_dir = os.path.join(_TOP_DIR, tel, 'sorted_tiles')
problem_file = os.path.join(_HOME_DIR, 'problem_galaxies.txt')
#numbers_file = os.path.join(_HOME_DIR, 'number_of_tiles_per_galaxy.dat')
bg_reg_file = os.path.join(_HOME_DIR, 'galex_reprojected_bg.reg')
numbers_file = os.path.join(_HOME_DIR, 'gal_reproj_info.dat')
galaxy_mosaic_file = os.path.join(_MOSAIC_DIR, '_'.join([name, band]).upper() + '.FITS')
start_time = time.time()
#if not os.path.exists(galaxy_mosaic_file):
if name == 'NGC2976':
print name
# READ THE INDEX FILE (IF NOT PASSED IN)
if index is None:
indexfile = os.path.join(_INDEX_DIR, tel + '_index_file.fits')
ext = 1
index, hdr = pyfits.getdata(indexfile, ext, header=True)
# CALIBRATION FROM COUNTS TO ABMAG
fuv_toab = 18.82
nuv_toab = 20.08
# PIXEL SCALE IN ARCSECONDS
pix_as = 1.5 # galex pixel scale -- from galex docs
# MAKE A HEADER
pix_scale = 1.5 / 3600. # 1.5 arbitrary: how should I set it?
pix_len = size_deg / pix_scale
target_hdr = create_hdr(ra_ctr, dec_ctr, pix_len, pix_scale)
# CALCULATE TILE OVERLAP
tile_overlaps = calc_tile_overlap(ra_ctr, dec_ctr, pad=size_deg,
min_ra=index['MIN_RA'],
max_ra=index['MAX_RA'],
min_dec=index['MIN_DEC'],
max_dec=index['MAX_DEC'])
# FIND OVERLAPPING TILES WITH RIGHT BAND
# index file set up such that index['fuv'] = 1 where fuv and
# index['nuv'] = 1 where nuv
ind = np.where((index[band]) & tile_overlaps)
ct_overlap = len(ind[0])
# MAKE SURE THERE ARE OVERLAPPING TILES
if ct_overlap == 0:
if write_info:
with open(problem_file, 'a') as myfile:
myfile.write(name + ': ' + 'No overlapping tiles\n')
return
# SET UP THE OUTPUT
ri_targ, di_targ = make_axes(target_hdr)
sz_out = ri_targ.shape
outim = ri_targ * np.nan
prihdu = pyfits.PrimaryHDU(data=outim, header=target_hdr)
target_hdr = prihdu.header
# GATHER THE INPUT FILES
infiles = index[ind[0]]['fname']
wtfiles = index[ind[0]]['rrhrfile']
flgfiles = index[ind[0]]['flagfile']
infiles = [os.path.join(data_dir, f) for f in infiles]
wtfiles = [os.path.join(data_dir, f) for f in wtfiles]
flgfiles = [os.path.join(data_dir, f) for f in flgfiles]
# CREATE NEW TEMP DIRECTORY TO STORE TEMPORARY FILES
gal_dir = os.path.join(_HOME_DIR, name)
os.makedirs(gal_dir)
# CREATE SUBDIRECTORIES INSIDE TEMP DIRECTORY FOR ALL TEMP FILES
input_dir = os.path.join(gal_dir, 'input')
reprojected_dir = os.path.join(gal_dir, 'reprojected')
weights_dir = os.path.join(gal_dir, 'weights')
weighted_dir = os.path.join(gal_dir, 'weighted')
final_dir = os.path.join(gal_dir, 'mosaic')
for indir in [input_dir, reprojected_dir, weights_dir, weighted_dir, final_dir]:
os.makedirs(indir)
# SYMLINK ORIGINAL RRHR FILES TO TEMPORARY INPUT DIRECTORY
for wtfile in wtfiles:
basename = os.path.basename(wtfile)
new_wt_file = os.path.join(input_dir, basename)
os.symlink(wtfile, new_wt_file)
for flgfile in flgfiles:
basename = os.path.basename(flgfile)
new_flg_file = os.path.join(input_dir, basename)
os.symlink(flgfile, new_flg_file)
# CONVERT INT FILES TO MJY/SR AND WRITE NEW FILES INTO TEMP DIR
# CONVERT WT FILES TO WT/SR AND WRITE NEW FILES INTO TEMP DIR
int_outfiles = [os.path.join(input_dir, f.split('/')[-1].replace('.fits', '_mjysr.fits')) for f in infiles]
wt_outfiles = [os.path.join(input_dir, f.split('/')[-1].replace('.fits', '_sr.fits')) for f in wtfiles]
for i in range(len(infiles)):
im, hdr = pyfits.getdata(infiles[i], header=True)
wt, whdr = pyfits.getdata(wtfiles[i], header=True)
#wt = wtpersr(wt, pix_as)
if band.lower() == 'fuv':
im = counts2jy_galex(im, fuv_toab, pix_as)
if band.lower() == 'nuv':
im = counts2jy_galex(im, nuv_toab, pix_as)
if not os.path.exists(int_outfiles[i]):
pyfits.writeto(int_outfiles[i], im, hdr)
#pyfits.writeto(wt_outfiles[i], wt, whdr)
# APPEND UNIT INFORMATION TO THE NEW HEADER
target_hdr['BUNIT'] = 'MJY/SR'
# WRITE OUT A HEADER FILE
hdr_file = os.path.join(gal_dir, name + '_template.hdr')
write_headerfile(hdr_file, target_hdr)
# PERFORM THE REPROJECTION, WEIGHTING, AND EXTRACTION
#try:
# REPROJECT INPUT IMAGES (-int and -rrhr)
int_suff, rrhr_suff, flag_suff = '*_mjysr.fits', '*-rrhr.fits', '*-flags.fits'
int_images = sorted(glob.glob(os.path.join(input_dir, int_suff)))
rrhr_images = sorted(glob.glob(os.path.join(input_dir, rrhr_suff)))
flag_images = sorted(glob.glob(os.path.join(input_dir, flag_suff)))
reproject_images(hdr_file, int_images, rrhr_images, flag_images, input_dir, reprojected_dir)
# WEIGHT IMAGES
im_suff, wt_suff = '*_mjysr_masked.fits', '*-rrhr_masked.fits'
imfiles = sorted(glob.glob(os.path.join(reprojected_dir, im_suff)))
wtfiles = sorted(glob.glob(os.path.join(reprojected_dir, wt_suff)))
weight_images(imfiles, wtfiles, weighted_dir, weights_dir)
# CREATE THE METADATA TABLES NEEDED FOR COADDITION
tables = create_tables(weights_dir, weighted_dir)
# COADD THE REPROJECTED, WEIGHTED IMAGES AND THE WEIGHT IMAGES
coadd(hdr_file, final_dir, weights_dir, weighted_dir)
# DIVIDE OUT THE WEIGHTS
imagefile = finish_weight(final_dir)
# SUBTRACT OUT THE BACKGROUND
remove_background(final_dir, imagefile, bg_reg_file)
# COPY MOSAIC FILE TO CUTOUTS DIRECTORY
mosaic_file = os.path.join(final_dir, 'final_mosaic.fits')
newfile = '_'.join([name, band]).upper() + '.FITS'
new_mosaic_file = os.path.join(_MOSAIC_DIR, newfile)
shutil.copy(mosaic_file, new_mosaic_file)
# REMOVE GALAXY DIRECTORY AND EXTRA FILES
#shutil.rmtree(gal_dir)
stop_time = time.time()
total_time = (stop_time - start_time) / 60.
# WRITE OUT THE NUMBER OF TILES THAT OVERLAP THE GIVEN GALAXY
if write_info:
out_arr = [name, len(infiles), np.around(total_time,2)]
with open(numbers_file, 'a') as nfile:
nfile.write('{0: >10}'.format(out_arr[0]))
nfile.write('{0: >6}'.format(out_arr[1]))
nfile.write('{0: >6}'.format(out_arr[2]) + '\n')
#nfile.write(name + ': ' + str(len(infiles)) + '\n')
# SOMETHING WENT WRONG
#except Exception as inst:
# me = sys.exc_info()[0]
# if write_info:
# with open(problem_file, 'a') as myfile:
# myfile.write(name + ': ' + str(me) + ': '+str(inst)+'\n')
# shutil.rmtree(gal_dir)
return
def counts2jy_galex(counts, cal, pix_as):
# first convert to abmag
abmag = -2.5 * np.log10(counts) + cal
# then convert to Jy
f_nu = 10**(abmag/-2.5) * 3631.
# then to MJy
f_nu *= 1e-6
# then to MJy/sr
val = f_nu / (np.radians(pix_as/3600))**2
return val
#val = flux / MJYSR2JYARCSEC / pixel_area / 1e-23 / C * FUV_LAMBDA**2
def wtpersr(wt, pix_as):
return wt / (np.radians(pix_as/3600))**2
def mask_galex(intfile, wtfile, flagfile, outfile=None, chip_rad = 1400, chip_x0=1920, chip_y0=1920, out_intfile=None, out_wtfile=None):
if out_intfile is None:
out_intfile = intfile.replace('.fits', '_masked.fits')
if out_wtfile is None:
out_wtfile = wtfile.replace('.fits', '_masked.fits')
if not os.path.exists(out_intfile):
data, hdr = pyfits.getdata(intfile, header=True)
wt, whdr = pyfits.getdata(wtfile, header=True)
flag, fhdr = pyfits.getdata(flagfile, header=True)
factor = float(len(data)) / len(flag)
upflag = zoom(flag, factor, order=0)
# chip_x0, chip_y0 = hdr['CRPIX1'], hdr['CRPIX2']
x = np.arange(data.shape[1]).reshape(1, -1) + 1
y = np.arange(data.shape[0]).reshape(-1, 1) + 1
r = np.sqrt((x - chip_x0)**2 + (y - chip_y0)**2)
i = (r > chip_rad) #| (data == 0)
data = np.where(i, 0, data)
wt = np.where(i, 1e-20, wt)
pyfits.writeto(out_intfile, data, hdr)
pyfits.writeto(out_wtfile, wt, whdr)
def reproject_images(template_header, int_images, rrhr_images, flag_images, input_dir, reprojected_dir, whole=False, exact=True):
# MASK IMAGES
for i in range(len(int_images)):
image_infile = int_images[i]
wt_infile = rrhr_images[i]
flg_infile = flag_images[i]
image_outfile = os.path.join(input_dir, os.path.basename(image_infile).replace('.fits', '_masked.fits'))
wt_outfile = os.path.join(input_dir, os.path.basename(wt_infile).replace('.fits', '_masked.fits'))
mask_galex(image_infile, wt_infile, flg_infile, out_intfile=image_outfile, out_wtfile=wt_outfile)
# REPROJECT IMAGES
input_table = os.path.join(input_dir, 'input.tbl')
montage.mImgtbl(input_dir, input_table, corners=True)
# Create reprojection directory, reproject, and get image metadata
stats_table = os.path.join(reprojected_dir, 'mProjExec_stats.log')
montage.mProjExec(input_table, template_header, reprojected_dir, stats_table, raw_dir=input_dir, whole=whole, exact=exact)
reprojected_table = os.path.join(reprojected_dir, 'reprojected.tbl')
montage.mImgtbl(reprojected_dir, reprojected_table, corners=True)
def weight_images(imfiles, wtfiles, weighted_dir, weights_dir):
for i in range(len(imfiles)):
imfile = imfiles[i]
wtfile = wtfiles[i]
im, hdr = pyfits.getdata(imfile, header=True)
rrhr, rrhrhdr = pyfits.getdata(wtfile, header=True)
wt = rrhr
newim = im * wt
nf = imfiles[i].split('/')[-1].replace('.fits', '_weighted.fits')
newfile = os.path.join(weighted_dir, nf)
pyfits.writeto(newfile, newim, hdr)
old_area_file = imfiles[i].replace('.fits', '_area.fits')
new_area_file = newfile.replace('.fits', '_area.fits')
shutil.copy(old_area_file, new_area_file)
nf = wtfiles[i].split('/')[-1].replace('.fits', '_weights.fits')
weightfile = os.path.join(weights_dir, nf)
pyfits.writeto(weightfile, wt, rrhrhdr)
old_area_file = wtfiles[i].replace('.fits', '_area.fits')
new_area_file = weightfile.replace('.fits', '_area.fits')
shutil.copy(old_area_file, new_area_file)
def create_tables(weights_dir, weighted_dir):
return_tables = []
in_dir = weights_dir
reprojected_table = os.path.join(in_dir, 'weights_reprojected.tbl')
montage.mImgtbl(in_dir, reprojected_table, corners=True)
return_tables.append(reprojected_table)
in_dir = weighted_dir
reprojected_table = os.path.join(in_dir, 'int_reprojected.tbl')
montage.mImgtbl(in_dir, reprojected_table, corners=True)
return_tables.append(reprojected_table)
return return_tables
def coadd(template_header, output_dir, weights_dir, weighted_dir):
img_dirs = [weights_dir, weighted_dir]
outputs = ['weights', 'int']
for img_dir, output in zip(img_dirs, outputs):
reprojected_table = os.path.join(img_dir, output + '_reprojected.tbl')
out_image = os.path.join(output_dir, output + '_mosaic.fits')
montage.mAdd(reprojected_table, template_header, out_image, img_dir=img_dir, exact=True)
def finish_weight(output_dir):
image_file = os.path.join(output_dir, 'int_mosaic.fits')
wt_file = os.path.join(output_dir, 'weights_mosaic.fits')
im, hdr = pyfits.getdata(image_file, header=True)
wt, wthdr = pyfits.getdata(wt_file, header=True)
newim = im / wt
newfile = os.path.join(output_dir, 'image_mosaic.fits')
pyfits.writeto(newfile, newim, hdr)
return newfile
def remove_background(final_dir, imfile, bgfile):
data, hdr = pyfits.getdata(imfile, header=True)
box_inds = read_bg_regfile(bgfile)
allvals = []
sample_means = []
for box in box_inds:
rectangle = zip(box[0::2], box[1::2])
sample = get_bg_sample(data, hdr, rectangle)
for s in sample:
allvals.append(s)
sample_mean = np.nanmean(sample)
sample_means.append(sample_mean)
this_mean = np.around(np.nanmean(sample_means), 8)
final_data = data - this_mean
hdr['BG'] = this_mean
hdr['comment'] = 'Background has been subtracted.'
outfile = os.path.join(final_dir, 'final_mosaic.fits')
pyfits.writeto(outfile, final_data, hdr)
def read_bg_regfile(regfile):
f = open(regfile, 'r')
boxes = f.readlines()
f.close()
box_list = []
for b in boxes:
this_box = []
box = b.strip('polygon()\n').split(',')
[this_box.append(int(np.around(float(bb), 0))) for bb in box]
box_list.append(this_box)
return box_list
def get_bg_sample(data, hdr, box):
wcs = pywcs.WCS(hdr, naxis=2)
x, y = np.arange(data.shape[0]), np.arange(data.shape[1])
X, Y = np.meshgrid(x, y, indexing='ij')
xx, yy = X.flatten(), Y.flatten()
pixels = np.array(zip(yy, xx))
box_coords = box
sel = Path(box_coords).contains_points(pixels)
sample = data.flatten()[sel]
return sample
|
mit
|
TomAugspurger/pandas
|
pandas/tests/config/test_localization.py
|
4
|
2861
|
import codecs
import locale
import os
import pytest
from pandas._config.localization import can_set_locale, get_locales, set_locale
from pandas.compat import is_platform_windows
import pandas as pd
_all_locales = get_locales() or []
_current_locale = locale.getlocale()
# Don't run any of these tests if we are on Windows or have no locales.
pytestmark = pytest.mark.skipif(
is_platform_windows() or not _all_locales, reason="Need non-Windows and locales"
)
_skip_if_only_one_locale = pytest.mark.skipif(
len(_all_locales) <= 1, reason="Need multiple locales for meaningful test"
)
def test_can_set_locale_valid_set():
# Can set the default locale.
assert can_set_locale("")
def test_can_set_locale_invalid_set():
# Cannot set an invalid locale.
assert not can_set_locale("non-existent_locale")
def test_can_set_locale_invalid_get(monkeypatch):
# see GH#22129
# In some cases, an invalid locale can be set,
# but a subsequent getlocale() raises a ValueError.
def mock_get_locale():
raise ValueError()
with monkeypatch.context() as m:
m.setattr(locale, "getlocale", mock_get_locale)
assert not can_set_locale("")
def test_get_locales_at_least_one():
# see GH#9744
assert len(_all_locales) > 0
@_skip_if_only_one_locale
def test_get_locales_prefix():
first_locale = _all_locales[0]
assert len(get_locales(prefix=first_locale[:2])) > 0
@_skip_if_only_one_locale
@pytest.mark.parametrize(
"lang,enc",
[
("it_CH", "UTF-8"),
("en_US", "ascii"),
("zh_CN", "GB2312"),
("it_IT", "ISO-8859-1"),
],
)
def test_set_locale(lang, enc):
if all(x is None for x in _current_locale):
# Not sure why, but on some Travis runs with pytest,
# getlocale() returned (None, None).
pytest.skip("Current locale is not set.")
enc = codecs.lookup(enc).name
new_locale = lang, enc
if not can_set_locale(new_locale):
msg = "unsupported locale setting"
with pytest.raises(locale.Error, match=msg):
with set_locale(new_locale):
pass
else:
with set_locale(new_locale) as normalized_locale:
new_lang, new_enc = normalized_locale.split(".")
new_enc = codecs.lookup(enc).name
normalized_locale = new_lang, new_enc
assert normalized_locale == new_locale
# Once we exit the "with" statement, locale should be back to what it was.
current_locale = locale.getlocale()
assert current_locale == _current_locale
def test_encoding_detected():
system_locale = os.environ.get("LC_ALL")
system_encoding = system_locale.split(".")[-1] if system_locale else "utf-8"
assert (
codecs.lookup(pd.options.display.encoding).name
== codecs.lookup(system_encoding).name
)
|
bsd-3-clause
|
ygenc/onlineLDA
|
onlineldavb_new/build/scipy/scipy/stats/distributions.py
|
2
|
213950
|
# Functions to implement several important functions for
# various Continous and Discrete Probability Distributions
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
import math
import warnings
from copy import copy
from scipy.misc import comb, derivative
from scipy import special
from scipy import optimize
from scipy import integrate
from scipy.special import gammaln as gamln
import inspect
from numpy import alltrue, where, arange, putmask, \
ravel, take, ones, sum, shape, product, repeat, reshape, \
zeros, floor, logical_and, log, sqrt, exp, arctanh, tan, sin, arcsin, \
arctan, tanh, ndarray, cos, cosh, sinh, newaxis, array, log1p, expm1
from numpy import atleast_1d, polyval, ceil, place, extract, \
any, argsort, argmax, vectorize, r_, asarray, nan, inf, pi, isinf, \
power, NINF, empty
import numpy
import numpy as np
import numpy.random as mtrand
from numpy import flatnonzero as nonzero
import vonmises_cython
__all__ = [
'rv_continuous',
'ksone', 'kstwobign', 'norm', 'alpha', 'anglit', 'arcsine',
'beta', 'betaprime', 'bradford', 'burr', 'fisk', 'cauchy',
'chi', 'chi2', 'cosine', 'dgamma', 'dweibull', 'erlang',
'expon', 'exponweib', 'exponpow', 'fatiguelife', 'foldcauchy',
'f', 'foldnorm', 'frechet_r', 'weibull_min', 'frechet_l',
'weibull_max', 'genlogistic', 'genpareto', 'genexpon', 'genextreme',
'gamma', 'gengamma', 'genhalflogistic', 'gompertz', 'gumbel_r',
'gumbel_l', 'halfcauchy', 'halflogistic', 'halfnorm', 'hypsecant',
'gausshyper', 'invgamma', 'invgauss', 'invweibull',
'johnsonsb', 'johnsonsu', 'laplace', 'levy', 'levy_l',
'levy_stable', 'logistic', 'loggamma', 'loglaplace', 'lognorm',
'gilbrat', 'maxwell', 'mielke', 'nakagami', 'ncx2', 'ncf', 't',
'nct', 'pareto', 'lomax', 'powerlaw', 'powerlognorm', 'powernorm',
'rdist', 'rayleigh', 'reciprocal', 'rice', 'recipinvgauss',
'semicircular', 'triang', 'truncexpon', 'truncnorm',
'tukeylambda', 'uniform', 'vonmises', 'wald', 'wrapcauchy',
'entropy', 'rv_discrete',
'binom', 'bernoulli', 'nbinom', 'geom', 'hypergeom', 'logser',
'poisson', 'planck', 'boltzmann', 'randint', 'zipf', 'dlaplace',
'skellam'
]
floatinfo = numpy.finfo(float)
errp = special.errprint
arr = asarray
gam = special.gamma
import types
from scipy.misc import doccer
all = alltrue
sgf = vectorize
try:
from new import instancemethod
except ImportError:
# Python 3
def instancemethod(func, obj, cls):
return types.MethodType(func, obj)
# These are the docstring parts used for substitution in specific
# distribution docstrings.
docheaders = {'methods':"""\nMethods\n-------\n""",
'parameters':"""\nParameters\n---------\n""",
'notes':"""\nNotes\n-----\n""",
'examples':"""\nExamples\n--------\n"""}
_doc_rvs = \
"""rvs(%(shapes)s, loc=0, scale=1, size=1)
Random variates.
"""
_doc_pdf = \
"""pdf(x, %(shapes)s, loc=0, scale=1)
Probability density function.
"""
_doc_logpdf = \
"""logpdf(x, %(shapes)s, loc=0, scale=1)
Log of the probability density function.
"""
_doc_pmf = \
"""pmf(x, %(shapes)s, loc=0, scale=1)
Probability mass function.
"""
_doc_logpmf = \
"""logpmf(x, %(shapes)s, loc=0, scale=1)
Log of the probability mass function.
"""
_doc_cdf = \
"""cdf(x, %(shapes)s, loc=0, scale=1)
Cumulative density function.
"""
_doc_logcdf = \
"""logcdf(x, %(shapes)s, loc=0, scale=1)
Log of the cumulative density function.
"""
_doc_sf = \
"""sf(x, %(shapes)s, loc=0, scale=1)
Survival function (1-cdf --- sometimes more accurate).
"""
_doc_logsf = \
"""logsf(x, %(shapes)s, loc=0, scale=1)
Log of the survival function.
"""
_doc_ppf = \
"""ppf(q, %(shapes)s, loc=0, scale=1)
Percent point function (inverse of cdf --- percentiles).
"""
_doc_isf = \
"""isf(q, %(shapes)s, loc=0, scale=1)
Inverse survival function (inverse of sf).
"""
_doc_moment = \
"""moment(n, %(shapes)s, loc=0, scale=1)
Non-central moment of order n
"""
_doc_stats = \
"""stats(%(shapes)s, loc=0, scale=1, moments='mv')
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = \
"""entropy(%(shapes)s, loc=0, scale=1)
(Differential) entropy of the RV.
"""
_doc_fit = \
"""fit(data, %(shapes)s, loc=0, scale=1)
Parameter estimates for generic data.
"""
_doc_expect = \
"""expect(func, %(shapes)s, loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = \
"""expect(func, %(shapes)s, loc=0, lb=None, ub=None, conditional=False)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = \
"""median(%(shapes)s, loc=0, scale=1)
Median of the distribution.
"""
_doc_mean = \
"""mean(%(shapes)s, loc=0, scale=1)
Mean of the distribution.
"""
_doc_var = \
"""var(%(shapes)s, loc=0, scale=1)
Variance of the distribution.
"""
_doc_std = \
"""std(%(shapes)s, loc=0, scale=1)
Standard deviation of the distribution.
"""
_doc_interval = \
"""interval(alpha, %(shapes)s, loc=0, scale=1)
Endpoints of the range that contains alpha percent of the distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
# Note that the two lines for %(shapes) are searched for and replaced in
# rv_continuous and rv_discrete - update there if the exact string changes
_doc_default_callparams = \
"""
Parameters
----------
x : array_like
quantiles
q : array_like
lower or upper tail probability
%(shapes)s : array_like
shape parameters
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
shape of random variates (default computed from input arguments )
moments : str, optional
composed of letters ['mvsk'] specifying which moments to compute where
'm' = mean, 'v' = variance, 's' = (Fisher's) skew and
'k' = (Fisher's) kurtosis. (default='mv')
"""
_doc_default_longsummary = \
"""Continuous random variables are defined from a standard form and may
require some shape parameters to complete its specification. Any
optional keyword parameters can be passed to the methods of the RV
object as given below:
"""
_doc_default_frozen_note = \
"""
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = \
"""Examples
--------
>>> from scipy.stats import %(name)s
>>> numargs = %(name)s.numargs
>>> [ %(shapes)s ] = [0.9,] * numargs
>>> rv = %(name)s(%(shapes)s)
Display frozen pdf
>>> x = np.linspace(0, np.minimum(rv.dist.b, 3))
>>> h = plt.plot(x, rv.pdf(x))
Check accuracy of cdf and ppf
>>> prb = %(name)s.cdf(x, %(shapes)s)
>>> h = plt.semilogy(np.abs(x - %(name)s.ppf(prb, %(shapes)s)) + 1e-20)
Random number generation
>>> R = %(name)s.rvs(%(shapes)s, size=100)
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
_doc_default_callparams,
_doc_default_frozen_note,
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods,
_doc_default_callparams,
_doc_default_frozen_note])
docdict = {'rvs':_doc_rvs,
'pdf':_doc_pdf,
'logpdf':_doc_logpdf,
'cdf':_doc_cdf,
'logcdf':_doc_logcdf,
'sf':_doc_sf,
'logsf':_doc_logsf,
'ppf':_doc_ppf,
'isf':_doc_isf,
'stats':_doc_stats,
'entropy':_doc_entropy,
'fit':_doc_fit,
'moment':_doc_moment,
'expect':_doc_expect,
'interval':_doc_interval,
'mean':_doc_mean,
'std':_doc_std,
'var':_doc_var,
'median':_doc_median,
'allmethods':_doc_allmethods,
'callparams':_doc_default_callparams,
'longsummary':_doc_default_longsummary,
'frozennote':_doc_default_frozen_note,
'example':_doc_default_example,
'default':_doc_default,
'before_notes':_doc_default_before_notes}
# Reuse common content between continous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'fit', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in
_doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(\
'Continuous', 'Discrete')
_doc_default_frozen_note = \
"""
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
docdict_discrete['example'] = _doc_default_example.replace('[0.9,]',
'Replace with reasonable value')
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['callparams'],
docdict_discrete['frozennote']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
try:
del s
except NameError:
# in Python 3, loop variables are not visible after the loop
pass
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n==0):
return 1.0
elif (n==1):
if mu is None:
val = moment_func(1,*args)
else:
val = mu
elif (n==2):
if mu2 is None or mu is None:
val = moment_func(2,*args)
else:
val = mu2 + mu*mu
elif (n==3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3,*args)
else:
mu3 = g1*(mu2**1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu**3 # 3rd non-central moment
elif (n==4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4,*args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*(mu2**1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu**4
else:
val = moment_func(n, *args)
return val
def _skew(data):
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / m2**1.5
def _kurtosis(data):
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
def _build_random_array(fun, args, size=None):
# Build an array by applying function fun to
# the arguments in args, creating an array with
# the specified shape.
# Allows an integer shape n as a shorthand for (n,).
if isinstance(size, types.IntType):
size = [size]
if size is not None and len(size) != 0:
n = numpy.multiply.reduce(size)
s = apply(fun, args + (n,))
s.shape = size
return s
else:
n = 1
s = apply(fun, args + (n,))
return s[0]
random = mtrand.random_sample
rand = mtrand.rand
random_integers = mtrand.random_integers
permutation = mtrand.permutation
## Internal class to compute a ppf given a distribution.
## (needs cdf function) and uses brentq from scipy.optimize
## to compute ppf from cdf.
class general_cont_ppf(object):
def __init__(self, dist, xa=-10.0, xb=10.0, xtol=1e-14):
self.dist = dist
self.cdf = eval('%scdf'%dist)
self.xa = xa
self.xb = xb
self.xtol = xtol
self.vecfunc = sgf(self._single_call,otypes='d')
def _tosolve(self, x, q, *args):
return apply(self.cdf, (x, )+args) - q
def _single_call(self, q, *args):
return optimize.brentq(self._tosolve, self.xa, self.xb, args=(q,)+args, xtol=self.xtol)
def __call__(self, q, *args):
return self.vecfunc(q, *args)
# Frozen RV class
class rv_frozen(object):
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
self.dist = dist
def pdf(self, x): #raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None):
kwds = self.kwds.copy()
kwds.update({'size':size})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments':moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, n):
return self.dist.moment(n, *self.args, **self.kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self,k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self,k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, alpha):
return self.dist.interval(alpha, *self.args, **self.kwds)
## NANs are returned for unsupported parameters.
## location and scale parameters are optional for each distribution.
## The shape parameters are generally required
##
## The loc and scale parameters must be given as keyword parameters.
## These are related to the common symbols in the .lyx file
## skew is third central moment / variance**(1.5)
## kurtosis is fourth central moment / variance**2 - 3
## References::
## Documentation for ranlib, rv2, cdflib and
##
## Eric Wesstein's world of mathematics http://mathworld.wolfram.com/
## http://mathworld.wolfram.com/topics/StatisticalDistributions.html
##
## Documentation to Regress+ by Michael McLaughlin
##
## Engineering and Statistics Handbook (NIST)
## http://www.itl.nist.gov/div898/handbook/index.htm
##
## Documentation for DATAPLOT from NIST
## http://www.itl.nist.gov/div898/software/dataplot/distribu.htm
##
## Norman Johnson, Samuel Kotz, and N. Balakrishnan "Continuous
## Univariate Distributions", second edition,
## Volumes I and II, Wiley & Sons, 1994.
## Each continuous random variable as the following methods
##
## rvs -- Random Variates (alternatively calling the class could produce these)
## pdf -- PDF
## logpdf -- log PDF (more numerically accurate if possible)
## cdf -- CDF
## logcdf -- log of CDF
## sf -- Survival Function (1-CDF)
## logsf --- log of SF
## ppf -- Percent Point Function (Inverse of CDF)
## isf -- Inverse Survival Function (Inverse of SF)
## stats -- Return mean, variance, (Fisher's) skew, or (Fisher's) kurtosis
## nnlf -- negative log likelihood function (to minimize)
## fit -- Model-fitting
##
## Maybe Later
##
## hf --- Hazard Function (PDF / SF)
## chf --- Cumulative hazard function (-log(SF))
## psf --- Probability sparsity function (reciprocal of the pdf) in
## units of percent-point-function (as a function of q).
## Also, the derivative of the percent-point function.
## To define a new random variable you subclass the rv_continuous class
## and re-define the
##
## _pdf method which will be given clean arguments (in between a and b)
## and passing the argument check method
##
## If postive argument checking is not correct for your RV
## then you will also need to re-define
## _argcheck
## Correct, but potentially slow defaults exist for the remaining
## methods but for speed and/or accuracy you can over-ride
##
## _cdf, _ppf, _rvs, _isf, _sf
##
## Rarely would you override _isf and _sf but you could for numerical precision.
##
## Statistics are computed using numerical integration by default.
## For speed you can redefine this using
##
## _stats --- take shape parameters and return mu, mu2, g1, g2
## --- If you can't compute one of these return it as None
##
## --- Can also be defined with a keyword argument moments=<str>
## where <str> is a string composed of 'm', 'v', 's',
## and/or 'k'. Only the components appearing in string
## should be computed and returned in the order 'm', 'v',
## 's', or 'k' with missing values returned as None
##
## OR
##
## You can override
##
## _munp -- takes n and shape parameters and returns
## -- then nth non-central moment of the distribution.
##
def valarray(shape,value=nan,typecode=None):
"""Return an array of all value.
"""
out = reshape(repeat([value],product(shape,axis=0),axis=0),shape)
if typecode is not None:
out = out.astype(typecode)
if not isinstance(out, ndarray):
out = arr(out)
return out
# This should be rewritten
def argsreduce(cond, *args):
"""Return the sequence of ravel(args[i]) where ravel(condition) is
True in 1D.
Examples
--------
>>> import numpy as np
>>> rand = np.random.random_sample
>>> A = rand((4,5))
>>> B = 2
>>> C = rand((1,5))
>>> cond = np.ones(A.shape)
>>> [A1,B1,C1] = argsreduce(cond,A,B,C)
>>> B1.shape
(20,)
>>> cond[2,:] = 0
>>> [A2,B2,C2] = argsreduce(cond,A,B,C)
>>> B2.shape
(15,)
"""
newargs = atleast_1d(*args)
if not isinstance(newargs, list):
newargs = [newargs,]
expand_arr = (cond==cond)
return [extract(cond, arr1 * expand_arr) for arr1 in newargs]
class rv_generic(object):
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def _fix_loc_scale(self, args, loc, scale=1):
N = len(args)
if N > self.numargs:
if N == self.numargs + 1 and loc is None:
# loc is given without keyword
loc = args[-1]
if N == self.numargs + 2 and scale is None:
# loc and scale given without keyword
loc, scale = args[-2:]
args = args[:self.numargs]
if scale is None:
scale = 1.0
if loc is None:
loc = 0.0
return args, loc, scale
def _fix_loc(self, args, loc):
args, loc, scale = self._fix_loc_scale(args, loc)
return args, loc
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self,*args,**kwds):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
defining number of random variates (default=1)
Returns
-------
rvs : array_like
random variates of given `size`
"""
kwd_names = ['loc', 'scale', 'size', 'discrete']
loc, scale, size, discrete = map(kwds.get, kwd_names,
[None]*len(kwd_names))
args, loc, scale = self._fix_loc_scale(args, loc, scale)
cond = logical_and(self._argcheck(*args),(scale >= 0))
if not all(cond):
raise ValueError("Domain error in arguments.")
# self._size is total size of all output values
self._size = product(size, axis=0)
if self._size is not None and self._size > 1:
size = numpy.array(size, ndmin=1)
if np.all(scale == 0):
return loc*ones(size, 'd')
vals = self._rvs(*args)
if self._size is not None:
vals = reshape(vals, size)
vals = vals * scale + loc
# Cast to int if discrete
if discrete:
if numpy.isscalar(vals):
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def median(self, *args, **kwds):
"""
Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
median : float
the median of the distribution.
See Also
--------
self.ppf --- inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""
Mean of the distribution
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""
Variance of the distribution
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""
Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, alpha, *args, **kwds):
"""Confidence interval with equal areas around the median
Parameters
----------
alpha : array_like float in [0,1]
Probability that an rv will be drawn from the returned range
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the instance
object for more information)
loc: array_like, optioal
location parameter (deafult = 0)
scale : array_like, optional
scale paramter (default = 1)
Returns
-------
a, b: array_like (float)
end-points of range that contain alpha % of the rvs
"""
alpha = arr(alpha)
if any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
class rv_continuous(rv_generic):
"""
A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances from for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default) for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xa : float, optional
Lower bound for fixed point calculation for generic ppf.
xb : float, optional
Upper bound for fixed point calculation for generic ppf.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : object, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
Methods
-------
rvs(<shape(s)>, loc=0, scale=1, size=1)
random variates
pdf(x, <shape(s)>, loc=0, scale=1)
probability density function
logpdf(x, <shape(s)>, loc=0, scale=1)
log of the probability density function
cdf(x, <shape(s)>, loc=0, scale=1)
cumulative density function
logcdf(x, <shape(s)>, loc=0, scale=1)
log of the cumulative density function
sf(x, <shape(s)>, loc=0, scale=1)
survival function (1-cdf --- sometimes more accurate)
logsf(x, <shape(s)>, loc=0, scale=1)
log of the survival function
ppf(q, <shape(s)>, loc=0, scale=1)
percent point function (inverse of cdf --- quantiles)
isf(q, <shape(s)>, loc=0, scale=1)
inverse survival function (inverse of sf)
moment(n, <shape(s)>, loc=0, scale=1)
non-central n-th moment of the distribution. May not work for array arguments.
stats(<shape(s)>, loc=0, scale=1, moments='mv')
mean('m'), variance('v'), skew('s'), and/or kurtosis('k')
entropy(<shape(s)>, loc=0, scale=1)
(differential) entropy of the RV.
fit(data, <shape(s)>, loc=0, scale=1)
Parameter estimates for generic data
expect(func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds)
Expected value of a function with respect to the distribution.
Additional kwd arguments passed to integrate.quad
median(<shape(s)>, loc=0, scale=1)
Median of the distribution.
mean(<shape(s)>, loc=0, scale=1)
Mean of the distribution.
std(<shape(s)>, loc=0, scale=1)
Standard deviation of the distribution.
var(<shape(s)>, loc=0, scale=1)
Variance of the distribution.
interval(alpha, <shape(s)>, loc=0, scale=1)
Interval that with `alpha` percent probability contains a random
realization of this distribution.
__call__(<shape(s)>, loc=0, scale=1)
Calling a distribution instance creates a frozen RV object with the
same methods but holding the given shape, location, and scale fixed.
See Notes section.
**Parameters for Methods**
x : array_like
quantiles
q : array_like
lower or upper tail probability
<shape(s)> : array_like
shape parameters
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
shape of random variates (default computed from input arguments )
moments : string, optional
composed of letters ['mvsk'] specifying which moments to compute where
'm' = mean, 'v' = variance, 's' = (Fisher's) skew and
'k' = (Fisher's) kurtosis. (default='mv')
n : int
order of moment to calculate in method moments
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
Notes
-----
**Frozen Distribution**
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
frozen RV object with the same methods but holding the given shape,
location, and scale fixed
**Subclassing**
New random variables can be defined by subclassing rv_continuous class
and re-defining at least the
_pdf or the _cdf method (normalized to location 0 and scale 1)
which will be given clean arguments (in between a and b) and
passing the argument check method
If postive argument checking is not correct for your RV
then you will also need to re-define ::
_argcheck
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride ::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
Rarely would you override _isf, _sf, and _logsf but you could.
Statistics are computed using numerical integration by default.
For speed you can redefine this using
_stats
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument moments=<str>
where <str> is a string composed of 'm', 'v', 's',
and/or 'k'. Only the components appearing in string
should be computed and returned in the order 'm', 'v',
's', or 'k' with missing values returned as None
OR
You can override
_munp
takes n and shape parameters and returns
the nth non-central moment of the distribution.
Examples
--------
To create a new Gaussian distribution, we would do the following::
class gaussian_gen(rv_continuous):
"Gaussian distribution"
def _pdf:
...
...
"""
def __init__(self, momtype=1, a=None, b=None, xa=-10.0, xb=10.0,
xtol=1e-14, badvalue=None, name=None, longname=None,
shapes=None, extradoc=None):
rv_generic.__init__(self)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xa = xa
self.xb = xb
self.xtol = xtol
self._size = 1
self.m = 0.0
self.moment_type = momtype
self.expandarr = 1
if not hasattr(self,'numargs'):
#allows more general subclassing with *args
cdf_signature = inspect.getargspec(self._cdf.im_func)
numargs1 = len(cdf_signature[0]) - 2
pdf_signature = inspect.getargspec(self._pdf.im_func)
numargs2 = len(pdf_signature[0]) - 2
self.numargs = max(numargs1, numargs2)
#nin correction
self.vecfunc = sgf(self._ppf_single_call,otypes='d')
self.vecfunc.nin = self.numargs + 1
self.vecentropy = sgf(self._entropy,otypes='d')
self.vecentropy.nin = self.numargs + 1
self.veccdf = sgf(self._cdf_single_call,otypes='d')
self.veccdf.nin = self.numargs + 1
self.shapes = shapes
self.extradoc = extradoc
if momtype == 0:
self.generic_moment = sgf(self._mom0_sc,otypes='d')
else:
self.generic_moment = sgf(self._mom1_sc,otypes='d')
self.generic_moment.nin = self.numargs+1 # Because of the *args argument
# of _mom0_sc, vectorize cannot count the number of arguments correctly.
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
# generate docstring for subclass instances
if self.__doc__ is None:
self._construct_default_doc(longname=longname, extradoc=extradoc)
else:
self._construct_doc()
## This only works for old-style classes...
# self.__class__.__doc__ = self.__doc__
def _construct_default_doc(self, longname=None, extradoc=None):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s continuous random variable.'%longname,
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc()
def _construct_doc(self):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['callparams', 'default', 'before_notes']:
tempdict[item] = tempdict[item].replace(\
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
def _ppf_to_solve(self, x, q,*args):
return apply(self.cdf, (x, )+args)-q
def _ppf_single_call(self, q, *args):
return optimize.brentq(self._ppf_to_solve, self.xa, self.xb, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x,m,*args):
return x**m * self.pdf(x,*args)
def _mom0_sc(self, m,*args):
return integrate.quad(self._mom_integ0, self.a,
self.b, args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q,m,*args):
return (self.ppf(q,*args))**m
def _mom1_sc(self, m,*args):
return integrate.quad(self._mom_integ1, 0, 1,args=(m,)+args)[0]
## These are the methods you must define (standard form functions)
def _argcheck(self, *args):
# Default check for correct values on args and keywords.
# Returns condition array of 1's where arguments are correct and
# 0's where they are not.
cond = 1
for arg in args:
cond = logical_and(cond,(arr(arg) > 0))
return cond
def _pdf(self,x,*args):
return derivative(self._cdf,x,dx=1e-5,args=args,order=5)
## Could also define any of these
def _logpdf(self, x, *args):
return log(self._pdf(x, *args))
##(return 1-d using self._size to get number)
def _rvs(self, *args):
## Use basic inverse cdf algorithm for RV generation as default.
U = mtrand.sample(self._size)
Y = self._ppf(U,*args)
return Y
def _cdf_single_call(self, x, *args):
return integrate.quad(self._pdf, self.a, x, args=args)[0]
def _cdf(self, x, *args):
return self.veccdf(x,*args)
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x,*args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self.vecfunc(q,*args)
def _isf(self, q, *args):
return self._ppf(1.0-q,*args) #use correct _ppf for subclasses
# The actual cacluation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self,*args, **kwds):
return None, None, None, None
# Central moments
def _munp(self,n,*args):
return self.generic_moment(n,*args)
def pdf(self,x,*args,**kwds):
"""
Probability density function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
pdf : ndarray
Probability density function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(arr,(x,loc,scale))
args = tuple(map(arr,args))
x = arr((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = zeros(shape(cond),'d')
putmask(output,(1-cond0)+np.isnan(x),self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output,cond,self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""
Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logpdf : array_like
Log of the probability density function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(arr,(x,loc,scale))
args = tuple(map(arr,args))
x = arr((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
putmask(output,(1-cond0)+np.isnan(x),self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output,cond,self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self,x,*args,**kwds):
"""
Cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
cdf : array_like
Cumulative distribution function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(arr,(x,loc,scale))
args = tuple(map(arr,args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = zeros(shape(cond),'d')
place(output,(1-cond0)+np.isnan(x),self.badvalue)
place(output,cond2,1.0)
if any(cond): #call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output,cond,self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self,x,*args,**kwds):
"""
Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(arr,(x,loc,scale))
args = tuple(map(arr,args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
place(output,(1-cond0)*(cond1==cond1)+np.isnan(x),self.badvalue)
place(output,cond2,0.0)
if any(cond): #call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output,cond,self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self,x,*args,**kwds):
"""
Survival function (1-cdf) at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
sf : array_like
Survival function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(arr,(x,loc,scale))
args = tuple(map(arr,args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = zeros(shape(cond),'d')
place(output,(1-cond0)+np.isnan(x),self.badvalue)
place(output,cond2,1.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output,cond,self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self,x,*args,**kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as (1 - `cdf`),
evaluated at `x`.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `x`.
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(arr,(x,loc,scale))
args = tuple(map(arr,args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
place(output,(1-cond0)+np.isnan(x),self.badvalue)
place(output,cond2,0.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output,cond,self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self,q,*args,**kwds):
"""
Percent point function (inverse of cdf) at q of the given RV.
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
q,loc,scale = map(arr,(q,loc,scale))
args = tuple(map(arr,args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc==loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q==1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond),value=self.a*scale + loc)
place(output,(1-cond0)+(1-cond1)*(q!=0.0), self.badvalue)
place(output,cond2,self.b*scale + loc)
if any(cond): #call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale,loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output,cond,self._ppf(*goodargs)*scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self,q,*args,**kwds):
"""
Inverse survival function at q of the given RV.
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the upper tail probability q.
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
q,loc,scale = map(arr,(q,loc,scale))
args = tuple(map(arr,args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc==loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q==1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond),value=self.b)
#place(output,(1-cond0)*(cond1==cond1), self.badvalue)
place(output,(1-cond0)*(cond1==cond1)+(1-cond1)*(q!=0.0), self.badvalue)
place(output,cond2,self.a)
if any(cond): #call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale,loc))) #PB replace 1-q by q
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output,cond,self._isf(*goodargs)*scale + loc) #PB use _isf instead of _ppf
if output.ndim == 0:
return output[()]
return output
def stats(self,*args,**kwds):
"""
Some statistics of the given RV
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
moments : string, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default='mv')
Returns
-------
stats : sequence
of requested moments.
"""
loc,scale,moments=map(kwds.get,['loc','scale','moments'])
N = len(args)
if N > self.numargs:
if N == self.numargs + 1 and loc is None:
# loc is given without keyword
loc = args[-1]
if N == self.numargs + 2 and scale is None:
# loc and scale given without keyword
loc, scale = args[-2:]
if N == self.numargs + 3 and moments is None:
# loc, scale, and moments
loc, scale, moments = args[-3:]
args = args[:self.numargs]
if scale is None: scale = 1.0
if loc is None: loc = 0.0
if moments is None: moments = 'mv'
loc,scale = map(arr,(loc,scale))
args = tuple(map(arr,args))
cond = self._argcheck(*args) & (scale > 0) & (loc==loc)
signature = inspect.getargspec(self._stats.im_func)
if (signature[2] is not None) or ('moments' in signature[0]):
mu, mu2, g1, g2 = self._stats(*args,**{'moments':moments})
else:
mu, mu2, g1, g2 = self._stats(*args)
if g1 is None:
mu3 = None
else:
mu3 = g1*np.power(mu2,1.5) #(mu2**1.5) breaks down for nan and inf
default = valarray(shape(cond), self.badvalue)
output = []
# Use only entries that are valid in calculation
if any(cond):
goodargs = argsreduce(cond, *(args+(scale,loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if 'm' in moments:
if mu is None:
mu = self._munp(1.0,*goodargs)
out0 = default.copy()
place(out0,cond,mu*scale+loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
mu2 = mu2p - mu*mu
if np.isinf(mu):
#if mean is inf then var is also inf
mu2 = np.inf
out0 = default.copy()
place(out0,cond,mu2*scale*scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
mu2 = mu2p - mu*mu
mu3 = mu3p - 3*mu*mu2 - mu**3
g1 = mu3 / mu2**1.5
out0 = default.copy()
place(out0,cond,g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
mu2 = mu2p - mu*mu
if mu3 is None:
mu3p = self._munp(3.0,*goodargs)
mu3 = mu3p - 3*mu*mu2 - mu**3
mu4 = mu4p - 4*mu*mu3 - 6*mu*mu*mu2 - mu**4
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0,cond,g2)
output.append(out0)
else: #no valid args
output = []
for _ in moments:
out0 = default.copy()
output.append(out0)
if len(output) == 1:
return output[0]
else:
return tuple(output)
def moment(self, n, *args, **kwds):
"""
n'th order non-central moment of distribution
Parameters
----------
n: int, n>=1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
kwds : keyword arguments, optional
These can include "loc" and "scale", as well as other keyword
arguments relevant for a given distribution.
"""
loc = kwds.get('loc', 0)
scale = kwds.get('scale', 1)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0): raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
signature = inspect.getargspec(self._stats.im_func)
if (signature[2] is not None) or ('moments' in signature[0]):
mdict = {'moments':{1:'m',2:'v',3:'vs',4:'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*args,**mdict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# so E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n,k)*(S/L)^k E[Y^k],k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n,k,exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def _nnlf(self, x, *args):
return -sum(self._logpdf(x, *args),axis=0)
def nnlf(self, theta, x):
# - sum (log pdf(x, theta),axis=0)
# where theta are the parameters (including loc and scale)
#
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return inf
x = arr((x-loc) / scale)
cond0 = (x <= self.a) | (x >= self.b)
if (any(cond0)):
return inf
else:
N = len(x)
return self._nnlf(x, *args) + N*log(scale)
# return starting point for fit (shape arguments + loc + scale)
def _fitstart(self, data, args=None):
if args is None:
args = (1.0,)*self.numargs
return args + self.fit_loc_scale(data, *args)
# Return the (possibly reduced) function to optimize in order to find MLE
# estimates for the .fit method
def _reduce_func(self, args, kwds):
args = list(args)
Nargs = len(args)
fixedn = []
index = range(Nargs)
names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
x0 = args[:]
for n, key in zip(index, names):
if kwds.has_key(key):
fixedn.append(n)
args[n] = kwds[key]
del x0[n]
if len(fixedn) == 0:
func = self.nnlf
restore = None
else:
if len(fixedn) == len(index):
raise ValueError("All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return self.nnlf(newtheta, x)
return x0, func, restore, args
def fit(self, data, *args, **kwds):
"""
Return MLEs for shape, location, and scale parameters from data.
MLE stands for Maximum Likelihood Estimate. Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in calculating the MLEs
args : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
kwds : floats, optional
Starting values for the location and scale parameters; no default.
Special keyword arguments are recognized as holding certain
parameters fixed:
f0...fn : hold respective shape parameters fixed.
floc : hold location parameter fixed to specified value.
fscale : hold scale parameter fixed to specified value.
optimizer : The optimizer to use. The optimizer must take func,
and starting position as the first two arguments,
plus args (for extra arguments to pass to the
function to be optimized) and disp=0 to suppress
output as keyword arguments.
Returns
-------
shape, loc, scale : tuple of floats
MLEs for any shape statistics, followed by those for location and
scale.
"""
Narg = len(args)
if Narg > self.numargs:
raise ValueError("Too many input arguments.")
start = [None]*2
if (Narg < self.numargs) or not (kwds.has_key('loc') and
kwds.has_key('scale')):
start = self._fitstart(data) # get distribution specific starting locations
args += start[Narg:-2]
loc = kwds.get('loc', start[-2])
scale = kwds.get('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds)
optimizer = kwds.get('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
if not callable(optimizer) and isinstance(optimizer, (str, unicode)):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError:
raise ValueError("%s is not a valid optimizer" % optimizer)
vals = optimizer(func,x0,args=(ravel(data),),disp=0)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
return vals
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments
"""
mu, mu2 = self.stats(*args,**{'moments':'mv'})
muhat = arr(data).mean()
mu2hat = arr(data).var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
return Lhat, Shat
@np.deprecate
def est_loc_scale(self, data, *args):
"""This function is deprecated, use self.fit_loc_scale(data) instead. """
return self.fit_loc_scale(data, *args)
def freeze(self,*args,**kwds):
return rv_frozen(self,*args,**kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return val*log(val)
entr = -integrate.quad(integ,self.a,self.b)[0]
if not np.isnan(entr):
return entr
else: # try with different limits if integration problems
low,upp = self.ppf([0.001,0.999],*args)
if np.isinf(self.b):
upper = upp
else:
upper = self.b
if np.isinf(self.a):
lower = low
else:
lower = self.a
return -integrate.quad(integ,lower,upper)[0]
def entropy(self, *args, **kwds):
"""
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
args = tuple(map(arr,args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc==loc)
output = zeros(shape(cond0),'d')
place(output,(1-cond0),self.badvalue)
goodargs = argsreduce(cond0, *args)
#I don't know when or why vecentropy got broken when numargs == 0
if self.numargs == 0:
place(output,cond0,self._entropy()+log(scale))
else:
place(output,cond0,self.vecentropy(*goodargs)+log(scale))
return output
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""calculate expected value of a function with respect to the distribution
location and scale only tested on a few examples
Parameters
----------
all parameters are keyword parameters
func : function (default: identity mapping)
Function for which integral is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
lb, ub : numbers
lower and upper bound for integration, default is set to the support
of the distribution
conditional : boolean (False)
If true then the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expected value : float
Notes
-----
This function has not been checked for it's behavior when the integral is
not finite. The integration behavior is inherited from integrate.quad.
"""
lockwds = {'loc': loc,
'scale':scale}
if func is None:
def fun(x, *args):
return x*self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x)*self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + self.a * scale
if ub is None:
ub = loc + self.b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
return integrate.quad(fun, lb, ub, **kwds)[0] / invfac
_EULER = 0.577215664901532860606512090082402431042 # -special.psi(1)
_ZETA3 = 1.202056903159594285399738161511449990765 # special.zeta(3,1) Apery's constant
## Kolmogorov-Smirnov one-sided and two-sided test statistics
class ksone_gen(rv_continuous):
"""General Kolmogorov-Smirnov one-sided test.
%(default)s
"""
def _cdf(self,x,n):
return 1.0-special.smirnov(n,x)
def _ppf(self,q,n):
return special.smirnovi(n,1.0-q)
ksone = ksone_gen(a=0.0, name='ksone', shapes="n")
class kstwobign_gen(rv_continuous):
"""Kolmogorov-Smirnov two-sided test for large N.
%(default)s
"""
def _cdf(self,x):
return 1.0-special.kolmogorov(x)
def _sf(self,x):
return special.kolmogorov(x)
def _ppf(self,q):
return special.kolmogi(1.0-q)
kstwobign = kstwobign_gen(a=0.0, name='kstwobign')
## Normal distribution
# loc = mu, scale = std
# Keep these implementations out of the class definition so they can be reused
# by other distributions.
_norm_pdf_C = math.sqrt(2*pi)
_norm_pdf_logC = math.log(_norm_pdf_C)
def _norm_pdf(x):
return exp(-x**2/2.0) / _norm_pdf_C
def _norm_logpdf(x):
return -x**2 / 2.0 - _norm_pdf_logC
def _norm_cdf(x):
return special.ndtr(x)
def _norm_logcdf(x):
return log(special.ndtr(x))
def _norm_ppf(q):
return special.ndtri(q)
class norm_gen(rv_continuous):
"""A normal continuous random variable.
The location (loc) keyword specifies the mean.
The scale (scale) keyword specifies the standard deviation.
%(before_notes)s
Notes
-----
The probability density function for `norm` is::
norm.pdf(x) = exp(-x**2/2)/sqrt(2*pi)
%(example)s
"""
def _rvs(self):
return mtrand.standard_normal(self._size)
def _pdf(self,x):
return _norm_pdf(x)
def _logpdf(self, x):
return _norm_logpdf(x)
def _cdf(self,x):
return _norm_cdf(x)
def _logcdf(self, x):
return _norm_logcdf(x)
def _sf(self, x):
return _norm_cdf(-x)
def _logsf(self, x):
return _norm_logcdf(-x)
def _ppf(self,q):
return _norm_ppf(q)
def _isf(self,q):
return -_norm_ppf(q)
def _stats(self):
return 0.0, 1.0, 0.0, 0.0
def _entropy(self):
return 0.5*(log(2*pi)+1)
norm = norm_gen(name='norm')
## Alpha distribution
##
class alpha_gen(rv_continuous):
"""An alpha continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `alpha` is::
alpha.pdf(x,a) = 1/(x**2*Phi(a)*sqrt(2*pi)) * exp(-1/2 * (a-1/x)**2),
where ``Phi(alpha)`` is the normal CDF, ``x > 0``, and ``a > 0``.
%(example)s
"""
def _pdf(self, x, a):
return 1.0/(x**2)/special.ndtr(a)*_norm_pdf(a-1.0/x)
def _logpdf(self, x, a):
return -2*log(x) + _norm_logpdf(a-1.0/x) - log(special.ndtr(a))
def _cdf(self, x, a):
return special.ndtr(a-1.0/x) / special.ndtr(a)
def _ppf(self, q, a):
return 1.0/arr(a-special.ndtri(q*special.ndtr(a)))
def _stats(self, a):
return [inf]*2 + [nan]*2
alpha = alpha_gen(a=0.0, name='alpha', shapes='a')
## Anglit distribution
##
class anglit_gen(rv_continuous):
"""An anglit continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `anglit` is::
anglit.pdf(x) = sin(2*x + pi/2) = cos(2*x),
for ``-pi/4 <= x <= pi/4``.
%(example)s
"""
def _pdf(self, x):
return cos(2*x)
def _cdf(self, x):
return sin(x+pi/4)**2.0
def _ppf(self, q):
return (arcsin(sqrt(q))-pi/4)
def _stats(self):
return 0.0, pi*pi/16-0.5, 0.0, -2*(pi**4 - 96)/(pi*pi-8)**2
def _entropy(self):
return 1-log(2)
anglit = anglit_gen(a=-pi/4, b=pi/4, name='anglit')
## Arcsine distribution
##
class arcsine_gen(rv_continuous):
"""An arcsine continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `arcsine` is::
arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x)))
for 0 < x < 1.
%(example)s
"""
def _pdf(self, x):
return 1.0/pi/sqrt(x*(1-x))
def _cdf(self, x):
return 2.0/pi*arcsin(sqrt(x))
def _ppf(self, q):
return sin(pi/2.0*q)**2.0
def _stats(self):
#mup = 0.5, 3.0/8.0, 15.0/48.0, 35.0/128.0
mu = 0.5
mu2 = 1.0/8
g1 = 0
g2 = -3.0/2.0
return mu, mu2, g1, g2
def _entropy(self):
return -0.24156447527049044468
arcsine = arcsine_gen(a=0.0, b=1.0, name='arcsine')
## Beta distribution
##
class beta_gen(rv_continuous):
"""A beta continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `beta` is::
beta.pdf(x, a, b) = gamma(a+b)/(gamma(a)*gamma(b)) * x**(a-1) *
(1-x)**(b-1),
for ``0 < x < 1``, ``a > 0``, ``b > 0``.
%(example)s
"""
def _rvs(self, a, b):
return mtrand.beta(a,b,self._size)
def _pdf(self, x, a, b):
Px = (1.0-x)**(b-1.0) * x**(a-1.0)
Px /= special.beta(a,b)
return Px
def _logpdf(self, x, a, b):
lPx = (b-1.0)*log(1.0-x) + (a-1.0)*log(x)
lPx -= log(special.beta(a,b))
return lPx
def _cdf(self, x, a, b):
return special.btdtr(a,b,x)
def _ppf(self, q, a, b):
return special.btdtri(a,b,q)
def _stats(self, a, b):
mn = a *1.0 / (a + b)
var = (a*b*1.0)/(a+b+1.0)/(a+b)**2.0
g1 = 2.0*(b-a)*sqrt((1.0+a+b)/(a*b)) / (2+a+b)
g2 = 6.0*(a**3 + a**2*(1-2*b) + b**2*(1+b) - 2*a*b*(2+b))
g2 /= a*b*(a+b+2)*(a+b+3)
return mn, var, g1, g2
def _fitstart(self, data):
g1 = _skew(data)
g2 = _kurtosis(data)
def func(x):
a, b = x
sk = 2*(b-a)*sqrt(a + b + 1) / (a + b + 2) / sqrt(a*b)
ku = a**3 - a**2*(2*b-1) + b**2*(b+1) - 2*a*b*(b+2)
ku /= a*b*(a+b+2)*(a+b+3)
ku *= 6
return [sk-g1, ku-g2]
a, b = optimize.fsolve(func, (1.0, 1.0))
return super(beta_gen, self)._fitstart(data, args=(a,b))
def fit(self, data, *args, **kwds):
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is not None and fscale is not None:
# special case
data = (ravel(data)-floc)/fscale
xbar = data.mean()
v = data.var(ddof=0)
fac = xbar*(1-xbar)/v - 1
a = xbar * fac
b = (1-xbar) * fac
return a, b, floc, fscale
else: # do general fit
return super(beta_gen, self).fit(data, *args, **kwds)
beta = beta_gen(a=0.0, b=1.0, name='beta', shapes='a, b')
## Beta Prime
class betaprime_gen(rv_continuous):
"""A beta prima continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `betaprime` is::
betaprime.pdf(x, a, b) =
gamma(a+b) / (gamma(a)*gamma(b)) * x**(a-1) * (1-x)**(-a-b)
for ``x > 0``, ``a > 0``, ``b > 0``.
%(example)s
"""
def _rvs(self, a, b):
u1 = gamma.rvs(a,size=self._size)
u2 = gamma.rvs(b,size=self._size)
return (u1 / u2)
def _pdf(self, x, a, b):
return 1.0/special.beta(a,b)*x**(a-1.0)/(1+x)**(a+b)
def _logpdf(self, x, a, b):
return (a-1.0)*log(x) - (a+b)*log(1+x) - log(special.beta(a,b))
def _cdf_skip(self, x, a, b):
# remove for now: special.hyp2f1 is incorrect for large a
x = where(x==1.0, 1.0-1e-6,x)
return pow(x,a)*special.hyp2f1(a+b,a,1+a,-x)/a/special.beta(a,b)
def _munp(self, n, a, b):
if (n == 1.0):
return where(b > 1, a/(b-1.0), inf)
elif (n == 2.0):
return where(b > 2, a*(a+1.0)/((b-2.0)*(b-1.0)), inf)
elif (n == 3.0):
return where(b > 3, a*(a+1.0)*(a+2.0)/((b-3.0)*(b-2.0)*(b-1.0)),
inf)
elif (n == 4.0):
return where(b > 4,
a*(a+1.0)*(a+2.0)*(a+3.0)/((b-4.0)*(b-3.0) \
*(b-2.0)*(b-1.0)), inf)
else:
raise NotImplementedError
betaprime = betaprime_gen(a=0.0, b=500.0, name='betaprime', shapes='a, b')
## Bradford
##
class bradford_gen(rv_continuous):
"""A Bradford continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `bradford` is::
bradford.pdf(x, c) = c / (k * (1+c*x)),
for ``0 < x < 1``, ``c > 0`` and ``k = log(1+c)``.
%(example)s
"""
def _pdf(self, x, c):
return c / (c*x + 1.0) / log(1.0+c)
def _cdf(self, x, c):
return log(1.0+c*x) / log(c+1.0)
def _ppf(self, q, c):
return ((1.0+c)**q-1)/c
def _stats(self, c, moments='mv'):
k = log(1.0+c)
mu = (c-k)/(c*k)
mu2 = ((c+2.0)*k-2.0*c)/(2*c*k*k)
g1 = None
g2 = None
if 's' in moments:
g1 = sqrt(2)*(12*c*c-9*c*k*(c+2)+2*k*k*(c*(c+3)+3))
g1 /= sqrt(c*(c*(k-2)+2*k))*(3*c*(k-2)+6*k)
if 'k' in moments:
g2 = c**3*(k-3)*(k*(3*k-16)+24)+12*k*c*c*(k-4)*(k-3) \
+ 6*c*k*k*(3*k-14) + 12*k**3
g2 /= 3*c*(c*(k-2)+2*k)**2
return mu, mu2, g1, g2
def _entropy(self, c):
k = log(1+c)
return k/2.0 - log(c/k)
bradford = bradford_gen(a=0.0, b=1.0, name='bradford', shapes='c')
## Burr
# burr with d=1 is called the fisk distribution
class burr_gen(rv_continuous):
"""A Burr continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `burr` is::
burr.pdf(x, c, d) = c * d * x**(-c-1) * (1+x**(-c))**(-d-1)
for ``x > 0``.
%(example)s
"""
def _pdf(self, x, c, d):
return c*d*(x**(-c-1.0))*((1+x**(-c*1.0))**(-d-1.0))
def _cdf(self, x, c, d):
return (1+x**(-c*1.0))**(-d**1.0)
def _ppf(self, q, c, d):
return (q**(-1.0/d)-1)**(-1.0/c)
def _stats(self, c, d, moments='mv'):
g2c, g2cd = gam(1-2.0/c), gam(2.0/c+d)
g1c, g1cd = gam(1-1.0/c), gam(1.0/c+d)
gd = gam(d)
k = gd*g2c*g2cd - g1c**2 * g1cd**2
mu = g1c*g1cd / gd
mu2 = k / gd**2.0
g1, g2 = None, None
g3c, g3cd = None, None
if 's' in moments:
g3c, g3cd = gam(1-3.0/c), gam(3.0/c+d)
g1 = 2*g1c**3 * g1cd**3 + gd*gd*g3c*g3cd - 3*gd*g2c*g1c*g1cd*g2cd
g1 /= sqrt(k**3)
if 'k' in moments:
if g3c is None:
g3c = gam(1-3.0/c)
if g3cd is None:
g3cd = gam(3.0/c+d)
g4c, g4cd = gam(1-4.0/c), gam(4.0/c+d)
g2 = 6*gd*g2c*g2cd * g1c**2 * g1cd**2 + gd**3 * g4c*g4cd
g2 -= 3*g1c**4 * g1cd**4 -4*gd**2*g3c*g1c*g1cd*g3cd
return mu, mu2, g1, g2
burr = burr_gen(a=0.0, name='burr', shapes="c, d")
# Fisk distribution
# burr is a generalization
class fisk_gen(burr_gen):
"""A Fisk continuous random variable.
The Fisk distribution is also known as the log-logistic distribution, and
equals the Burr distribution with ``d=1``.
%(before_notes)s
See Also
--------
burr
%(example)s
"""
def _pdf(self, x, c):
return burr_gen._pdf(self, x, c, 1.0)
def _cdf(self, x, c):
return burr_gen._cdf(self, x, c, 1.0)
def _ppf(self, x, c):
return burr_gen._ppf(self, x, c, 1.0)
def _stats(self, c):
return burr_gen._stats(self, c, 1.0)
def _entropy(self, c):
return 2 - log(c)
fisk = fisk_gen(a=0.0, name='fisk', shapes='c')
## Cauchy
# median = loc
class cauchy_gen(rv_continuous):
"""A Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `cauchy` is::
cauchy.pdf(x) = 1 / (pi * (1 + x**2))
%(example)s
"""
def _pdf(self, x):
return 1.0/pi/(1.0+x*x)
def _cdf(self, x):
return 0.5 + 1.0/pi*arctan(x)
def _ppf(self, q):
return tan(pi*q-pi/2.0)
def _sf(self, x):
return 0.5 - 1.0/pi*arctan(x)
def _isf(self, q):
return tan(pi/2.0-pi*q)
def _stats(self):
return inf, inf, nan, nan
def _entropy(self):
return log(4*pi)
def _fitstart(data, args=None):
return (0, 1)
cauchy = cauchy_gen(name='cauchy')
## Chi
## (positive square-root of chi-square)
## chi(1, loc, scale) = halfnormal
## chi(2, 0, scale) = Rayleigh
## chi(3, 0, scale) = MaxWell
class chi_gen(rv_continuous):
"""A chi continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi` is::
chi.pdf(x,df) = x**(df-1) * exp(-x**2/2) / (2**(df/2-1) * gamma(df/2))
for ``x > 0``.
%(example)s
"""
def _rvs(self, df):
return sqrt(chi2.rvs(df,size=self._size))
def _pdf(self, x, df):
return x**(df-1.)*exp(-x*x*0.5)/(2.0)**(df*0.5-1)/gam(df*0.5)
def _cdf(self, x, df):
return special.gammainc(df*0.5,0.5*x*x)
def _ppf(self, q, df):
return sqrt(2*special.gammaincinv(df*0.5,q))
def _stats(self, df):
mu = sqrt(2)*special.gamma(df/2.0+0.5)/special.gamma(df/2.0)
mu2 = df - mu*mu
g1 = (2*mu**3.0 + mu*(1-2*df))/arr(mu2**1.5)
g2 = 2*df*(1.0-df)-6*mu**4 + 4*mu**2 * (2*df-1)
g2 /= arr(mu2**2.0)
return mu, mu2, g1, g2
chi = chi_gen(a=0.0, name='chi', shapes='df')
## Chi-squared (gamma-distributed with loc=0 and scale=2 and shape=df/2)
class chi2_gen(rv_continuous):
"""A chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi2` is::
chi2.pdf(x,df) = 1 / (2*gamma(df/2)) * (x/2)**(df/2-1) * exp(-x/2)
%(example)s
"""
def _rvs(self, df):
return mtrand.chisquare(df,self._size)
def _pdf(self, x, df):
return exp(self._logpdf(x, df))
def _logpdf(self, x, df):
#term1 = (df/2.-1)*log(x)
#term1[(df==2)*(x==0)] = 0
#avoid 0*log(0)==nan
return (df/2.-1)*log(x+1e-300) - x/2. - gamln(df/2.) - (log(2)*df)/2.
## Px = x**(df/2.0-1)*exp(-x/2.0)
## Px /= special.gamma(df/2.0)* 2**(df/2.0)
## return log(Px)
def _cdf(self, x, df):
return special.chdtr(df, x)
def _sf(self, x, df):
return special.chdtrc(df, x)
def _isf(self, p, df):
return special.chdtri(df, p)
def _ppf(self, p, df):
return self._isf(1.0-p, df)
def _stats(self, df):
mu = df
mu2 = 2*df
g1 = 2*sqrt(2.0/df)
g2 = 12.0/df
return mu, mu2, g1, g2
chi2 = chi2_gen(a=0.0, name='chi2', shapes='df')
## Cosine (Approximation to the Normal)
class cosine_gen(rv_continuous):
"""A cosine continuous random variable.
%(before_notes)s
Notes
-----
The cosine distribution is an approximation to the normal distribution.
The probability density function for `cosine` is::
cosine.pdf(x) = 1/(2*pi) * (1+cos(x))
for ``-pi <= x <= pi``.
%(example)s
"""
def _pdf(self, x):
return 1.0/2/pi*(1+cos(x))
def _cdf(self, x):
return 1.0/2/pi*(pi + x + sin(x))
def _stats(self):
return 0.0, pi*pi/3.0-2.0, 0.0, -6.0*(pi**4-90)/(5.0*(pi*pi-6)**2)
def _entropy(self):
return log(4*pi)-1.0
cosine = cosine_gen(a=-pi, b=pi, name='cosine')
## Double Gamma distribution
class dgamma_gen(rv_continuous):
"""A double gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dgamma` is::
dgamma.pdf(x, a) = 1 / (2*gamma(a)) * abs(x)**(a-1) * exp(-abs(x))
for ``a > 0``.
%(example)s
"""
def _rvs(self, a):
u = random(size=self._size)
return (gamma.rvs(a,size=self._size)*where(u>=0.5,1,-1))
def _pdf(self, x, a):
ax = abs(x)
return 1.0/(2*special.gamma(a))*ax**(a-1.0) * exp(-ax)
def _logpdf(self, x, a):
ax = abs(x)
return (a-1.0)*log(ax) - ax - log(2) - gamln(a)
def _cdf(self, x, a):
fac = 0.5*special.gammainc(a,abs(x))
return where(x>0,0.5+fac,0.5-fac)
def _sf(self, x, a):
fac = 0.5*special.gammainc(a,abs(x))
#return where(x>0,0.5-0.5*fac,0.5+0.5*fac)
return where(x>0,0.5-fac,0.5+fac)
def _ppf(self, q, a):
fac = special.gammainccinv(a,1-abs(2*q-1))
return where(q>0.5, fac, -fac)
def _stats(self, a):
mu2 = a*(a+1.0)
return 0.0, mu2, 0.0, (a+2.0)*(a+3.0)/mu2-3.0
dgamma = dgamma_gen(name='dgamma', shapes='a')
## Double Weibull distribution
##
class dweibull_gen(rv_continuous):
"""A double Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dweibull` is::
dweibull.pdf(x, c) = c / 2 * abs(x)**(c-1) * exp(-abs(x)**c)
%(example)s
"""
def _rvs(self, c):
u = random(size=self._size)
return weibull_min.rvs(c, size=self._size)*(where(u>=0.5,1,-1))
def _pdf(self, x, c):
ax = abs(x)
Px = c/2.0*ax**(c-1.0)*exp(-ax**c)
return Px
def _logpdf(self, x, c):
ax = abs(x)
return log(c) - log(2.0) + (c-1.0)*log(ax) - ax**c
def _cdf(self, x, c):
Cx1 = 0.5*exp(-abs(x)**c)
return where(x > 0, 1-Cx1, Cx1)
def _ppf_skip(self, q, c):
fac = where(q<=0.5,2*q,2*q-1)
fac = pow(arr(log(1.0/fac)),1.0/c)
return where(q>0.5,fac,-fac)
def _stats(self, c):
var = gam(1+2.0/c)
return 0.0, var, 0.0, gam(1+4.0/c)/var
dweibull = dweibull_gen(name='dweibull', shapes='c')
## ERLANG
##
## Special case of the Gamma distribution with shape parameter an integer.
##
class erlang_gen(rv_continuous):
"""An Erlang continuous random variable.
%(before_notes)s
Notes
-----
The Erlang distribution is a special case of the Gamma distribution, with
the shape parameter an integer.
%(example)s
"""
def _rvs(self, n):
return gamma.rvs(n,size=self._size)
def _arg_check(self, n):
return (n > 0) & (floor(n)==n)
def _pdf(self, x, n):
Px = (x)**(n-1.0)*exp(-x)/special.gamma(n)
return Px
def _logpdf(self, x, n):
return (n-1.0)*log(x) - x - gamln(n)
def _cdf(self, x, n):
return special.gdtr(1.0,n,x)
def _sf(self, x, n):
return special.gdtrc(1.0,n,x)
def _ppf(self, q, n):
return special.gdtrix(1.0, n, q)
def _stats(self, n):
n = n*1.0
return n, n, 2/sqrt(n), 6/n
def _entropy(self, n):
return special.psi(n)*(1-n) + 1 + gamln(n)
erlang = erlang_gen(a=0.0, name='erlang', shapes='n')
## Exponential (gamma distributed with a=1.0, loc=loc and scale=scale)
## scale == 1.0 / lambda
class expon_gen(rv_continuous):
"""An exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `expon` is::
expon.pdf(x) = exp(-x)
for ``x >= 0``.
The scale parameter is equal to ``scale = 1.0 / lambda``.
%(example)s
"""
def _rvs(self):
return mtrand.standard_exponential(self._size)
def _pdf(self, x):
return exp(-x)
def _logpdf(self, x):
return -x
def _cdf(self, x):
return -expm1(-x)
def _ppf(self, q):
return -log1p(-q)
def _sf(self,x):
return exp(-x)
def _logsf(self, x):
return -x
def _isf(self,q):
return -log(q)
def _stats(self):
return 1.0, 1.0, 2.0, 6.0
def _entropy(self):
return 1.0
expon = expon_gen(a=0.0, name='expon')
## Exponentiated Weibull
class exponweib_gen(rv_continuous):
"""An exponentiated Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponweib` is::
exponweib.pdf(x, a, c) =
a * c * (1-exp(-x**c))**(a-1) * exp(-x**c)*x**(c-1)
for ``x > 0``, ``a > 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, a, c):
exc = exp(-x**c)
return a*c*(1-exc)**arr(a-1) * exc * x**(c-1)
def _logpdf(self, x, a, c):
exc = exp(-x**c)
return log(a) + log(c) + (a-1.)*log(1-exc) - x**c + (c-1.0)*log(x)
def _cdf(self, x, a, c):
exm1c = -expm1(-x**c)
return arr((exm1c)**a)
def _ppf(self, q, a, c):
return (-log1p(-q**(1.0/a)))**arr(1.0/c)
exponweib = exponweib_gen(a=0.0, name='exponweib', shapes="a, c")
## Exponential Power
class exponpow_gen(rv_continuous):
"""An exponential power continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponpow` is::
exponpow.pdf(x, b) = b * x**(b-1) * exp(1+x**b - exp(x**b))
for ``x >= 0``, ``b > 0``.
%(example)s
"""
def _pdf(self, x, b):
xbm1 = arr(x**(b-1.0))
xb = xbm1 * x
return exp(1)*b*xbm1 * exp(xb - exp(xb))
def _logpdf(self, x, b):
xb = x**(b-1.0)*x
return 1 + log(b) + (b-1.0)*log(x) + xb - exp(xb)
def _cdf(self, x, b):
xb = arr(x**b)
return -expm1(-expm1(xb))
def _sf(self, x, b):
xb = arr(x**b)
return exp(-expm1(xb))
def _isf(self, x, b):
return (log1p(-log(x)))**(1./b)
def _ppf(self, q, b):
return pow(log1p(-log1p(-q)), 1.0/b)
exponpow = exponpow_gen(a=0.0, name='exponpow', shapes='b')
## Fatigue-Life (Birnbaum-Sanders)
class fatiguelife_gen(rv_continuous):
"""A fatigue-life (Birnbaum-Sanders) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `fatiguelife` is::
fatiguelife.pdf(x,c) =
(x+1) / (2*c*sqrt(2*pi*x**3)) * exp(-(x-1)**2/(2*x*c**2))
for ``x > 0``.
%(example)s
"""
def _rvs(self, c):
z = norm.rvs(size=self._size)
x = 0.5*c*z
x2 = x*x
t = 1.0 + 2*x2 + 2*x*sqrt(1 + x2)
return t
def _pdf(self, x, c):
return (x+1)/arr(2*c*sqrt(2*pi*x**3))*exp(-(x-1)**2/arr((2.0*x*c**2)))
def _logpdf(self, x, c):
return log(x+1) - (x-1)**2 / (2.0*x*c**2) - log(2*c) - 0.5*(log(2*pi) + 3*log(x))
def _cdf(self, x, c):
return special.ndtr(1.0/c*(sqrt(x)-1.0/arr(sqrt(x))))
def _ppf(self, q, c):
tmp = c*special.ndtri(q)
return 0.25*(tmp + sqrt(tmp**2 + 4))**2
def _stats(self, c):
c2 = c*c
mu = c2 / 2.0 + 1
den = 5*c2 + 4
mu2 = c2*den /4.0
g1 = 4*c*sqrt(11*c2+6.0)/den**1.5
g2 = 6*c2*(93*c2+41.0) / den**2.0
return mu, mu2, g1, g2
fatiguelife = fatiguelife_gen(a=0.0, name='fatiguelife', shapes='c')
## Folded Cauchy
class foldcauchy_gen(rv_continuous):
"""A folded Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldcauchy` is::
foldcauchy.pdf(x, c) = 1/(pi*(1+(x-c)**2)) + 1/(pi*(1+(x+c)**2))
for ``x >= 0``.
%(example)s
"""
def _rvs(self, c):
return abs(cauchy.rvs(loc=c,size=self._size))
def _pdf(self, x, c):
return 1.0/pi*(1.0/(1+(x-c)**2) + 1.0/(1+(x+c)**2))
def _cdf(self, x, c):
return 1.0/pi*(arctan(x-c) + arctan(x+c))
def _stats(self, c):
return inf, inf, nan, nan
# setting xb=1000 allows to calculate ppf for up to q=0.9993
foldcauchy = foldcauchy_gen(a=0.0, name='foldcauchy', xb=1000, shapes='c')
## F
class f_gen(rv_continuous):
"""An F continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `f` is::
df2**(df2/2) * df1**(df1/2) * x**(df1/2-1)
F.pdf(x, df1, df2) = --------------------------------------------
(df2+df1*x)**((df1+df2)/2) * B(df1/2, df2/2)
for ``x > 0``.
%(example)s
"""
def _rvs(self, dfn, dfd):
return mtrand.f(dfn, dfd, self._size)
def _pdf(self, x, dfn, dfd):
# n = arr(1.0*dfn)
# m = arr(1.0*dfd)
# Px = m**(m/2) * n**(n/2) * x**(n/2-1)
# Px /= (m+n*x)**((n+m)/2)*special.beta(n/2,m/2)
return exp(self._logpdf(x, dfn, dfd))
def _logpdf(self, x, dfn, dfd):
n = 1.0*dfn
m = 1.0*dfd
lPx = m/2*log(m) + n/2*log(n) + (n/2-1)*log(x)
lPx -= ((n+m)/2)*log(m+n*x) + special.betaln(n/2,m/2)
return lPx
def _cdf(self, x, dfn, dfd):
return special.fdtr(dfn, dfd, x)
def _sf(self, x, dfn, dfd):
return special.fdtrc(dfn, dfd, x)
def _ppf(self, q, dfn, dfd):
return special.fdtri(dfn, dfd, q)
def _stats(self, dfn, dfd):
v2 = arr(dfd*1.0)
v1 = arr(dfn*1.0)
mu = where (v2 > 2, v2 / arr(v2 - 2), inf)
mu2 = 2*v2*v2*(v2+v1-2)/(v1*(v2-2)**2 * (v2-4))
mu2 = where(v2 > 4, mu2, inf)
g1 = 2*(v2+2*v1-2)/(v2-6)*sqrt((2*v2-4)/(v1*(v2+v1-2)))
g1 = where(v2 > 6, g1, nan)
g2 = 3/(2*v2-16)*(8+g1*g1*(v2-6))
g2 = where(v2 > 8, g2, nan)
return mu, mu2, g1, g2
f = f_gen(a=0.0, name='f', shapes="dfn, dfd")
## Folded Normal
## abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S)
##
## note: regress docs have scale parameter correct, but first parameter
## he gives is a shape parameter A = c * scale
## Half-normal is folded normal with shape-parameter c=0.
class foldnorm_gen(rv_continuous):
"""A folded normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldnorm` is::
foldnormal.pdf(x, c) = sqrt(2/pi) * cosh(c*x) * exp(-(x**2+c**2)/2)
for ``c >= 0``.
%(example)s
"""
def _rvs(self, c):
return abs(norm.rvs(loc=c,size=self._size))
def _pdf(self, x, c):
return sqrt(2.0/pi)*cosh(c*x)*exp(-(x*x+c*c)/2.0)
def _cdf(self, x, c,):
return special.ndtr(x-c) + special.ndtr(x+c) - 1.0
def _stats(self, c):
fac = special.erf(c/sqrt(2))
mu = sqrt(2.0/pi)*exp(-0.5*c*c)+c*fac
mu2 = c*c + 1 - mu*mu
c2 = c*c
g1 = sqrt(2/pi)*exp(-1.5*c2)*(4-pi*exp(c2)*(2*c2+1.0))
g1 += 2*c*fac*(6*exp(-c2) + 3*sqrt(2*pi)*c*exp(-c2/2.0)*fac + \
pi*c*(fac*fac-1))
g1 /= pi*mu2**1.5
g2 = c2*c2+6*c2+3+6*(c2+1)*mu*mu - 3*mu**4
g2 -= 4*exp(-c2/2.0)*mu*(sqrt(2.0/pi)*(c2+2)+c*(c2+3)*exp(c2/2.0)*fac)
g2 /= mu2**2.0
return mu, mu2, g1, g2
foldnorm = foldnorm_gen(a=0.0, name='foldnorm', shapes='c')
## Extreme Value Type II or Frechet
## (defined in Regress+ documentation as Extreme LB) as
## a limiting value distribution.
##
class frechet_r_gen(rv_continuous):
"""A Frechet right (or Weibull minimum) continuous random variable.
%(before_notes)s
See Also
--------
weibull_min : The same distribution as `frechet_r`.
frechet_l, weibull_max
Notes
-----
The probability density function for `frechet_r` is::
frechet_r.pdf(x, c) = c * x**(c-1) * exp(-x**c)
for ``x > 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return c*pow(x,c-1)*exp(-pow(x,c))
def _logpdf(self, x, c):
return log(c) + (c-1)*log(x) - pow(x,c)
def _cdf(self, x, c):
return -expm1(-pow(x,c))
def _ppf(self, q, c):
return pow(-log1p(-q),1.0/c)
def _munp(self, n, c):
return special.gamma(1.0+n*1.0/c)
def _entropy(self, c):
return -_EULER / c - log(c) + _EULER + 1
frechet_r = frechet_r_gen(a=0.0, name='frechet_r', shapes='c')
weibull_min = frechet_r_gen(a=0.0, name='weibull_min', shapes='c')
class frechet_l_gen(rv_continuous):
"""A Frechet left (or Weibull maximum) continuous random variable.
%(before_notes)s
See Also
--------
weibull_max : The same distribution as `frechet_l`.
frechet_r, weibull_min
Notes
-----
The probability density function for `frechet_l` is::
frechet_l.pdf(x, c) = c * (-x)**(c-1) * exp(-(-x)**c)
for ``x < 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return c*pow(-x,c-1)*exp(-pow(-x,c))
def _cdf(self, x, c):
return exp(-pow(-x,c))
def _ppf(self, q, c):
return -pow(-log(q),1.0/c)
def _munp(self, n, c):
val = special.gamma(1.0+n*1.0/c)
if (int(n) % 2):
sgn = -1
else:
sgn = 1
return sgn * val
def _entropy(self, c):
return -_EULER / c - log(c) + _EULER + 1
frechet_l = frechet_l_gen(b=0.0, name='frechet_l', shapes='c')
weibull_max = frechet_l_gen(b=0.0, name='weibull_max', shapes='c')
## Generalized Logistic
##
class genlogistic_gen(rv_continuous):
"""A generalized logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genlogistic` is::
genlogistic.pdf(x, c) = c * exp(-x) / (1 + exp(-x))**(c+1)
for ``x > 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
Px = c*exp(-x)/(1+exp(-x))**(c+1.0)
return Px
def _logpdf(self, x, c):
return log(c) - x - (c+1.0)*log1p(exp(-x))
def _cdf(self, x, c):
Cx = (1+exp(-x))**(-c)
return Cx
def _ppf(self, q, c):
vals = -log(pow(q,-1.0/c)-1)
return vals
def _stats(self, c):
zeta = special.zeta
mu = _EULER + special.psi(c)
mu2 = pi*pi/6.0 + zeta(2,c)
g1 = -2*zeta(3,c) + 2*_ZETA3
g1 /= mu2**1.5
g2 = pi**4/15.0 + 6*zeta(4,c)
g2 /= mu2**2.0
return mu, mu2, g1, g2
genlogistic = genlogistic_gen(name='genlogistic', shapes='c')
## Generalized Pareto
class genpareto_gen(rv_continuous):
"""A generalized Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genpareto` is::
genpareto.pdf(x, c) = (1 + c * x)**(-1 - 1/c)
for ``c != 0``, and for ``x >= 0`` for all c,
and ``x < 1/abs(c)`` for ``c < 0``.
%(example)s
"""
def _argcheck(self, c):
c = arr(c)
self.b = where(c < 0, 1.0/abs(c), inf)
return where(c==0, 0, 1)
def _pdf(self, x, c):
Px = pow(1+c*x,arr(-1.0-1.0/c))
return Px
def _logpdf(self, x, c):
return (-1.0-1.0/c) * np.log1p(c*x)
def _cdf(self, x, c):
return 1.0 - pow(1+c*x,arr(-1.0/c))
def _ppf(self, q, c):
vals = 1.0/c * (pow(1-q, -c)-1)
return vals
def _munp(self, n, c):
k = arange(0,n+1)
val = (-1.0/c)**n * sum(comb(n,k)*(-1)**k / (1.0-c*k),axis=0)
return where(c*n < 1, val, inf)
def _entropy(self, c):
if (c > 0):
return 1+c
else:
self.b = -1.0 / c
return rv_continuous._entropy(self, c)
genpareto = genpareto_gen(a=0.0, name='genpareto', shapes='c')
## Generalized Exponential
class genexpon_gen(rv_continuous):
"""A generalized exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genexpon` is::
genexpon.pdf(x, a, b, c) = (a + b * (1 - exp(-c*x))) * \
exp(-a*x - b*x + b/c * (1-exp(-c*x)))
for ``x >= 0``, ``a,b,c > 0``.
References
----------
"An Extension of Marshall and Olkin's Bivariate Exponential Distribution",
H.K. Ryu, Journal of the American Statistical Association, 1993.
"The Exponential Distribution: Theory, Methods and Applications",
N. Balakrishnan, Asit P. Basu.
%(example)s
"""
def _pdf(self, x, a, b, c):
return (a+b*(-expm1(-c*x)))*exp((-a-b)*x+b*(-expm1(-c*x))/c)
def _cdf(self, x, a, b, c):
return -expm1((-a-b)*x + b*(-expm1(-c*x))/c)
def _logpdf(self, x, a, b, c):
return np.log(a+b*(-expm1(-c*x))) + (-a-b)*x+b*(-expm1(-c*x))/c
genexpon = genexpon_gen(a=0.0, name='genexpon', shapes='a, b, c')
## Generalized Extreme Value
## c=0 is just gumbel distribution.
## This version does now accept c==0
## Use gumbel_r for c==0
# new version by Per Brodtkorb, see ticket:767
# also works for c==0, special case is gumbel_r
# increased precision for small c
class genextreme_gen(rv_continuous):
"""A generalized extreme value continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r
Notes
-----
For ``c=0``, `genextreme` is equal to `gumbel_r`.
The probability density function for `genextreme` is::
genextreme.pdf(x, c) =
exp(-exp(-x))*exp(-x), for c==0
exp(-(1-c*x)**(1/c))*(1-c*x)**(1/c-1), for x <= 1/c, c > 0
%(example)s
"""
def _argcheck(self, c):
min = np.minimum
max = np.maximum
sml = floatinfo.machar.xmin
#self.b = where(c > 0, 1.0 / c,inf)
#self.a = where(c < 0, 1.0 / c, -inf)
self.b = where(c > 0, 1.0 / max(c, sml),inf)
self.a = where(c < 0, 1.0 / min(c,-sml), -inf)
return where(abs(c)==inf, 0, 1) #True #(c!=0)
def _pdf(self, x, c):
## ex2 = 1-c*x
## pex2 = pow(ex2,1.0/c)
## p2 = exp(-pex2)*pex2/ex2
## return p2
cx = c*x
logex2 = where((c==0)*(x==x),0.0,log1p(-cx))
logpex2 = where((c==0)*(x==x),-x,logex2/c)
pex2 = exp(logpex2)
# % Handle special cases
logpdf = where((cx==1) | (cx==-inf),-inf,-pex2+logpex2-logex2)
putmask(logpdf,(c==1) & (x==1),0.0) # logpdf(c==1 & x==1) = 0; % 0^0 situation
return exp(logpdf)
def _cdf(self, x, c):
#return exp(-pow(1-c*x,1.0/c))
loglogcdf = where((c==0)*(x==x),-x,log1p(-c*x)/c)
return exp(-exp(loglogcdf))
def _ppf(self, q, c):
#return 1.0/c*(1.-(-log(q))**c)
x = -log(-log(q))
return where((c==0)*(x==x),x,-expm1(-c*x)/c)
def _stats(self,c):
g = lambda n : gam(n*c+1)
g1 = g(1)
g2 = g(2)
g3 = g(3);
g4 = g(4)
g2mg12 = where(abs(c)<1e-7,(c*pi)**2.0/6.0,g2-g1**2.0)
gam2k = where(abs(c)<1e-7,pi**2.0/6.0, expm1(gamln(2.0*c+1.0)-2*gamln(c+1.0))/c**2.0);
eps = 1e-14
gamk = where(abs(c)<eps,-_EULER,expm1(gamln(c+1))/c)
m = where(c<-1.0,nan,-gamk)
v = where(c<-0.5,nan,g1**2.0*gam2k)
#% skewness
sk1 = where(c<-1./3,nan,np.sign(c)*(-g3+(g2+2*g2mg12)*g1)/((g2mg12)**(3./2.)));
sk = where(abs(c)<=eps**0.29,12*sqrt(6)*_ZETA3/pi**3,sk1)
#% The kurtosis is:
ku1 = where(c<-1./4,nan,(g4+(-4*g3+3*(g2+g2mg12)*g1)*g1)/((g2mg12)**2))
ku = where(abs(c)<=(eps)**0.23,12.0/5.0,ku1-3.0)
return m,v,sk,ku
def _munp(self, n, c):
k = arange(0,n+1)
vals = 1.0/c**n * sum(comb(n,k) * (-1)**k * special.gamma(c*k + 1),axis=0)
return where(c*n > -1, vals, inf)
genextreme = genextreme_gen(name='genextreme', shapes='c')
## Gamma (Use MATLAB and MATHEMATICA (b=theta=scale, a=alpha=shape) definition)
## gamma(a, loc, scale) with a an integer is the Erlang distribution
## gamma(1, loc, scale) is the Exponential distribution
## gamma(df/2, 0, 2) is the chi2 distribution with df degrees of freedom.
class gamma_gen(rv_continuous):
"""A gamma continuous random variable.
%(before_notes)s
See Also
--------
erlang, expon
Notes
-----
When ``a`` is an integer, this is the Erlang distribution, and for ``a=1``
it is the exponential distribution.
The probability density function for `gamma` is::
gamma.pdf(x, a) = x**(a-1) * exp(-x) / gamma(a)
for ``x >= 0``, ``a > 0``.
%(example)s
"""
def _rvs(self, a):
return mtrand.standard_gamma(a, self._size)
def _pdf(self, x, a):
return exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return (a-1)*log(x) - x - gamln(a)
def _cdf(self, x, a):
return special.gammainc(a, x)
def _ppf(self, q, a):
return special.gammaincinv(a,q)
def _stats(self, a):
return a, a, 2.0/sqrt(a), 6.0/a
def _entropy(self, a):
return special.psi(a)*(1-a) + 1 + gamln(a)
def _fitstart(self, data):
a = 4 / _skew(data)**2
return super(gamma_gen, self)._fitstart(data, args=(a,))
def fit(self, data, *args, **kwds):
floc = kwds.get('floc', None)
if floc == 0:
xbar = ravel(data).mean()
logx_bar = ravel(log(data)).mean()
s = log(xbar) - logx_bar
def func(a):
return log(a) - special.digamma(a) - s
aest = (3-s + math.sqrt((s-3)**2 + 24*s)) / (12*s)
xa = aest*(1-0.4)
xb = aest*(1+0.4)
a = optimize.brentq(func, xa, xb, disp=0)
scale = xbar / a
return a, floc, scale
else:
return super(gamma_gen, self).fit(data, *args, **kwds)
gamma = gamma_gen(a=0.0, name='gamma', shapes='a')
# Generalized Gamma
class gengamma_gen(rv_continuous):
"""A generalized gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gengamma` is::
gengamma.pdf(x, a, c) = abs(c) * x**(c*a-1) * exp(-x**c) / gamma(a)
for ``x > 0``, ``a > 0``, and ``c != 0``.
%(example)s
"""
def _argcheck(self, a, c):
return (a > 0) & (c != 0)
def _pdf(self, x, a, c):
return abs(c)* exp((c*a-1)*log(x)-x**c- gamln(a))
def _cdf(self, x, a, c):
val = special.gammainc(a,x**c)
cond = c + 0*val
return where(cond>0,val,1-val)
def _ppf(self, q, a, c):
val1 = special.gammaincinv(a,q)
val2 = special.gammaincinv(a,1.0-q)
ic = 1.0/c
cond = c+0*val1
return where(cond > 0,val1**ic,val2**ic)
def _munp(self, n, a, c):
return special.gamma(a+n*1.0/c) / special.gamma(a)
def _entropy(self, a,c):
val = special.psi(a)
return a*(1-val) + 1.0/c*val + gamln(a)-log(abs(c))
gengamma = gengamma_gen(a=0.0, name='gengamma', shapes="a, c")
## Generalized Half-Logistic
##
class genhalflogistic_gen(rv_continuous):
"""A generalized half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genhalflogistic` is::
genhalflogistic.pdf(x, c) = 2 * (1-c*x)**(1/c-1) / (1+(1-c*x)**(1/c))**2
for ``0 <= x <= 1/c``, and ``c > 0``.
%(example)s
"""
def _argcheck(self, c):
self.b = 1.0 / c
return (c > 0)
def _pdf(self, x, c):
limit = 1.0/c
tmp = arr(1-c*x)
tmp0 = tmp**(limit-1)
tmp2 = tmp0*tmp
return 2*tmp0 / (1+tmp2)**2
def _cdf(self, x, c):
limit = 1.0/c
tmp = arr(1-c*x)
tmp2 = tmp**(limit)
return (1.0-tmp2) / (1+tmp2)
def _ppf(self, q, c):
return 1.0/c*(1-((1.0-q)/(1.0+q))**c)
def _entropy(self,c):
return 2 - (2*c+1)*log(2)
genhalflogistic = genhalflogistic_gen(a=0.0, name='genhalflogistic',
shapes='c')
## Gompertz (Truncated Gumbel)
## Defined for x>=0
class gompertz_gen(rv_continuous):
"""A Gompertz (or truncated Gumbel) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gompertz` is::
gompertz.pdf(x, c) = c * exp(x) * exp(-c*(exp(x)-1))
for ``x >= 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
ex = exp(x)
return c*ex*exp(-c*(ex-1))
def _cdf(self, x, c):
return 1.0-exp(-c*(exp(x)-1))
def _ppf(self, q, c):
return log(1-1.0/c*log(1-q))
def _entropy(self, c):
return 1.0 - log(c) - exp(c)*special.expn(1,c)
gompertz = gompertz_gen(a=0.0, name='gompertz', shapes='c')
## Gumbel, Log-Weibull, Fisher-Tippett, Gompertz
## The left-skewed gumbel distribution.
## and right-skewed are available as gumbel_l and gumbel_r
class gumbel_r_gen(rv_continuous):
"""A right-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_l, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_r` is::
gumbel_r.pdf(x) = exp(-(x + exp(-x)))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(example)s
"""
def _pdf(self, x):
ex = exp(-x)
return ex*exp(-ex)
def _logpdf(self, x):
return -x - exp(-x)
def _cdf(self, x):
return exp(-exp(-x))
def _logcdf(self, x):
return -exp(-x)
def _ppf(self, q):
return -log(-log(q))
def _stats(self):
return _EULER, pi*pi/6.0, \
12*sqrt(6)/pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return 1.0608407169541684911
gumbel_r = gumbel_r_gen(name='gumbel_r')
class gumbel_l_gen(rv_continuous):
"""A left-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_l` is::
gumbel_l.pdf(x) = exp(x - exp(x))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(example)s
"""
def _pdf(self, x):
ex = exp(x)
return ex*exp(-ex)
def _logpdf(self, x):
return x - exp(x)
def _cdf(self, x):
return 1.0-exp(-exp(x))
def _ppf(self, q):
return log(-log(1-q))
def _stats(self):
return -_EULER, pi*pi/6.0, \
-12*sqrt(6)/pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return 1.0608407169541684911
gumbel_l = gumbel_l_gen(name='gumbel_l')
# Half-Cauchy
class halfcauchy_gen(rv_continuous):
"""A Half-Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfcauchy` is::
halfcauchy.pdf(x) = 2 / (pi * (1 + x**2))
for ``x >= 0``.
%(example)s
"""
def _pdf(self, x):
return 2.0/pi/(1.0+x*x)
def _logpdf(self, x):
return np.log(2.0/pi) - np.log1p(x*x)
def _cdf(self, x):
return 2.0/pi*arctan(x)
def _ppf(self, q):
return tan(pi/2*q)
def _stats(self):
return inf, inf, nan, nan
def _entropy(self):
return log(2*pi)
halfcauchy = halfcauchy_gen(a=0.0, name='halfcauchy')
## Half-Logistic
##
class halflogistic_gen(rv_continuous):
"""A half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halflogistic` is::
halflogistic.pdf(x) = 2 * exp(-x) / (1+exp(-x))**2 = 1/2 * sech(x/2)**2
for ``x >= 0``.
%(example)s
"""
def _pdf(self, x):
return 0.5/(cosh(x/2.0))**2.0
def _cdf(self, x):
return tanh(x/2.0)
def _ppf(self, q):
return 2*arctanh(q)
def _munp(self, n):
if n==1: return 2*log(2)
if n==2: return pi*pi/3.0
if n==3: return 9*_ZETA3
if n==4: return 7*pi**4 / 15.0
return 2*(1-pow(2.0,1-n))*special.gamma(n+1)*special.zeta(n,1)
def _entropy(self):
return 2-log(2)
halflogistic = halflogistic_gen(a=0.0, name='halflogistic')
## Half-normal = chi(1, loc, scale)
class halfnorm_gen(rv_continuous):
"""A half-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfnorm` is::
halfnorm.pdf(x) = sqrt(2/pi) * exp(-x**2/2)
for ``x > 0``.
%(example)s
"""
def _rvs(self):
return abs(norm.rvs(size=self._size))
def _pdf(self, x):
return sqrt(2.0/pi)*exp(-x*x/2.0)
def _logpdf(self, x):
return 0.5 * np.log(2.0/pi) - x*x/2.0
def _cdf(self, x):
return special.ndtr(x)*2-1.0
def _ppf(self, q):
return special.ndtri((1+q)/2.0)
def _stats(self):
return sqrt(2.0/pi), 1-2.0/pi, sqrt(2)*(4-pi)/(pi-2)**1.5, \
8*(pi-3)/(pi-2)**2
def _entropy(self):
return 0.5*log(pi/2.0)+0.5
halfnorm = halfnorm_gen(a=0.0, name='halfnorm')
## Hyperbolic Secant
class hypsecant_gen(rv_continuous):
"""A hyperbolic secant continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `hypsecant` is::
hypsecant.pdf(x) = 1/pi * sech(x)
%(example)s
"""
def _pdf(self, x):
return 1.0/(pi*cosh(x))
def _cdf(self, x):
return 2.0/pi*arctan(exp(x))
def _ppf(self, q):
return log(tan(pi*q/2.0))
def _stats(self):
return 0, pi*pi/4, 0, 2
def _entropy(self):
return log(2*pi)
hypsecant = hypsecant_gen(name='hypsecant')
## Gauss Hypergeometric
class gausshyper_gen(rv_continuous):
"""A Gauss hypergeometric continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gausshyper` is::
gausshyper.pdf(x, a, b, c, z) =
C * x**(a-1) * (1-x)**(b-1) * (1+z*x)**(-c)
for ``0 <= x <= 1``, ``a > 0``, ``b > 0``, and
``C = 1 / (B(a,b) F[2,1](c, a; a+b; -z))``
%(example)s
"""
def _argcheck(self, a, b, c, z):
return (a > 0) & (b > 0) & (c==c) & (z==z)
def _pdf(self, x, a, b, c, z):
Cinv = gam(a)*gam(b)/gam(a+b)*special.hyp2f1(c,a,a+b,-z)
return 1.0/Cinv * x**(a-1.0) * (1.0-x)**(b-1.0) / (1.0+z*x)**c
def _munp(self, n, a, b, c, z):
fac = special.beta(n+a,b) / special.beta(a,b)
num = special.hyp2f1(c,a+n,a+b+n,-z)
den = special.hyp2f1(c,a,a+b,-z)
return fac*num / den
gausshyper = gausshyper_gen(a=0.0, b=1.0, name='gausshyper',
shapes="a, b, c, z")
## Inverted Gamma
# special case of generalized gamma with c=-1
#
class invgamma_gen(rv_continuous):
"""An inverted gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgamma` is::
invgamma.pdf(x, a) = x**(-a-1) / gamma(a) * exp(-1/x)
for x > 0, a > 0.
%(example)s
"""
def _pdf(self, x, a):
return exp(self._logpdf(x,a))
def _logpdf(self, x, a):
return (-(a+1)*log(x)-gamln(a) - 1.0/x)
def _cdf(self, x, a):
return 1.0-special.gammainc(a, 1.0/x)
def _ppf(self, q, a):
return 1.0/special.gammaincinv(a,1-q)
def _munp(self, n, a):
return exp(gamln(a-n) - gamln(a))
def _entropy(self, a):
return a - (a+1.0)*special.psi(a) + gamln(a)
invgamma = invgamma_gen(a=0.0, name='invgamma', shapes='a')
## Inverse Gaussian Distribution (used to be called 'invnorm'
# scale is gamma from DATAPLOT and B from Regress
class invgauss_gen(rv_continuous):
"""An inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgauss` is::
invgauss.pdf(x, mu) = 1 / sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2))
for ``x > 0``.
When `mu` is too small, evaluating the cumulative density function will be
inaccurate due to ``cdf(mu -> 0) = inf * 0``.
NaNs are returned for ``mu <= 0.0028``.
%(example)s
"""
def _rvs(self, mu):
return mtrand.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
return 1.0/sqrt(2*pi*x**3.0)*exp(-1.0/(2*x)*((x-mu)/mu)**2)
def _logpdf(self, x, mu):
return -0.5*log(2*pi) - 1.5*log(x) - ((x-mu)/mu)**2/(2*x)
def _cdf(self, x, mu):
fac = sqrt(1.0/x)
# Numerical accuracy for small `mu` is bad. See #869.
C1 = norm.cdf(fac*(x-mu)/mu)
C1 += exp(1.0/mu) * norm.cdf(-fac*(x+mu)/mu) * exp(1.0/mu)
return C1
def _stats(self, mu):
return mu, mu**3.0, 3*sqrt(mu), 15*mu
invgauss = invgauss_gen(a=0.0, name='invgauss', shapes="mu")
## Inverted Weibull
class invweibull_gen(rv_continuous):
"""An inverted Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invweibull` is::
invweibull.pdf(x, c) = c * x**(-c-1) * exp(-x**(-c))
for ``x > 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
xc1 = x**(-c-1.0)
#xc2 = xc1*x
xc2 = x**(-c)
xc2 = exp(-xc2)
return c*xc1*xc2
def _cdf(self, x, c):
xc1 = x**(-c)
return exp(-xc1)
def _ppf(self, q, c):
return pow(-log(q),arr(-1.0/c))
def _entropy(self, c):
return 1+_EULER + _EULER / c - log(c)
invweibull = invweibull_gen(a=0, name='invweibull', shapes='c')
## Johnson SB
class johnsonsb_gen(rv_continuous):
"""A Johnson SB continuous random variable.
%(before_notes)s
See Also
--------
johnsonsu
Notes
-----
The probability density function for `johnsonsb` is::
johnsonsb.pdf(x, a, b) = b / (x*(1-x)) * phi(a + b * log(x/(1-x)))
for ``0 < x < 1`` and ``a,b > 0``, and ``phi`` is the normal pdf.
%(example)s
"""
def _argcheck(self, a, b):
return (b > 0) & (a==a)
def _pdf(self, x, a, b):
trm = norm.pdf(a+b*log(x/(1.0-x)))
return b*1.0/(x*(1-x))*trm
def _cdf(self, x, a, b):
return norm.cdf(a+b*log(x/(1.0-x)))
def _ppf(self, q, a, b):
return 1.0/(1+exp(-1.0/b*(norm.ppf(q)-a)))
johnsonsb = johnsonsb_gen(a=0.0, b=1.0, name='johnsonb', shapes="a, b")
## Johnson SU
class johnsonsu_gen(rv_continuous):
"""A Johnson SU continuous random variable.
%(before_notes)s
See Also
--------
johnsonsb
Notes
-----
The probability density function for `johnsonsu` is::
johnsonsu.pdf(x, a, b) = b / sqrt(x**2 + 1) *
phi(a + b * log(x + sqrt(x**2 + 1)))
for all ``x, a, b > 0``, and `phi` is the normal pdf.
%(example)s
"""
def _argcheck(self, a, b):
return (b > 0) & (a==a)
def _pdf(self, x, a, b):
x2 = x*x
trm = norm.pdf(a+b*log(x+sqrt(x2+1)))
return b*1.0/sqrt(x2+1.0)*trm
def _cdf(self, x, a, b):
return norm.cdf(a+b*log(x+sqrt(x*x+1)))
def _ppf(self, q, a, b):
return sinh((norm.ppf(q)-a)/b)
johnsonsu = johnsonsu_gen(name='johnsonsu', shapes="a, b")
## Laplace Distribution
class laplace_gen(rv_continuous):
"""A Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `laplace` is::
laplace.pdf(x) = 1/2 * exp(-abs(x))
%(example)s
"""
def _rvs(self):
return mtrand.laplace(0, 1, size=self._size)
def _pdf(self, x):
return 0.5*exp(-abs(x))
def _cdf(self, x):
return where(x > 0, 1.0-0.5*exp(-x), 0.5*exp(x))
def _ppf(self, q):
return where(q > 0.5, -log(2*(1-q)), log(2*q))
def _stats(self):
return 0, 2, 0, 3
def _entropy(self):
return log(2)+1
laplace = laplace_gen(name='laplace')
## Levy Distribution
class levy_gen(rv_continuous):
"""A Levy continuous random variable.
%(before_notes)s
See Also
--------
levy_stable, levy_l
Notes
-----
The probability density function for `levy` is::
levy.pdf(x) = 1 / (x * sqrt(2*pi*x)) * exp(-1/(2*x))
for ``x > 0``.
This is the same as the Levy-stable distribution with a=1/2 and b=1.
%(example)s
"""
def _pdf(self, x):
return 1/sqrt(2*pi*x)/x*exp(-1/(2*x))
def _cdf(self, x):
return 2*(1-norm._cdf(1/sqrt(x)))
def _ppf(self, q):
val = norm._ppf(1-q/2.0)
return 1.0/(val*val)
def _stats(self):
return inf, inf, nan, nan
levy = levy_gen(a=0.0,name="levy")
## Left-skewed Levy Distribution
class levy_l_gen(rv_continuous):
"""A left-skewed Levy continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_stable
Notes
-----
The probability density function for `levy_l` is::
levy_l.pdf(x) = 1 / (abs(x) * sqrt(2*pi*abs(x))) * exp(-1/(2*abs(x)))
for ``x < 0``.
This is the same as the Levy-stable distribution with a=1/2 and b=-1.
%(example)s
"""
def _pdf(self, x):
ax = abs(x)
return 1/sqrt(2*pi*ax)/ax*exp(-1/(2*ax))
def _cdf(self, x):
ax = abs(x)
return 2*norm._cdf(1/sqrt(ax))-1
def _ppf(self, q):
val = norm._ppf((q+1.0)/2)
return -1.0/(val*val)
def _stats(self):
return inf, inf, nan, nan
levy_l = levy_l_gen(b=0.0, name="levy_l")
## Levy-stable Distribution (only random variates)
class levy_stable_gen(rv_continuous):
"""A Levy-stable continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_l
Notes
-----
Levy-stable distribution (only random variates available -- ignore other
docs)
%(example)s
"""
def _rvs(self, alpha, beta):
sz = self._size
TH = uniform.rvs(loc=-pi/2.0,scale=pi,size=sz)
W = expon.rvs(size=sz)
if alpha==1:
return 2/pi*(pi/2+beta*TH)*tan(TH)-beta*log((pi/2*W*cos(TH))/(pi/2+beta*TH))
# else
ialpha = 1.0/alpha
aTH = alpha*TH
if beta==0:
return W/(cos(TH)/tan(aTH)+sin(TH))*((cos(aTH)+sin(aTH)*tan(TH))/W)**ialpha
# else
val0 = beta*tan(pi*alpha/2)
th0 = arctan(val0)/alpha
val3 = W/(cos(TH)/tan(alpha*(th0+TH))+sin(TH))
res3 = val3*((cos(aTH)+sin(aTH)*tan(TH)-val0*(sin(aTH)-cos(aTH)*tan(TH)))/W)**ialpha
return res3
def _argcheck(self, alpha, beta):
if beta == -1:
self.b = 0.0
elif beta == 1:
self.a = 0.0
return (alpha > 0) & (alpha <= 2) & (beta <= 1) & (beta >= -1)
def _pdf(self, x, alpha, beta):
raise NotImplementedError
levy_stable = levy_stable_gen(name='levy_stable', shapes="alpha, beta")
## Logistic (special case of generalized logistic with c=1)
## Sech-squared
class logistic_gen(rv_continuous):
"""A logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `logistic` is::
logistic.pdf(x) = exp(-x) / (1+exp(-x))**2
%(example)s
"""
def _rvs(self):
return mtrand.logistic(size=self._size)
def _pdf(self, x):
ex = exp(-x)
return ex / (1+ex)**2.0
def _cdf(self, x):
return 1.0/(1+exp(-x))
def _ppf(self, q):
return -log(1.0/q-1)
def _stats(self):
return 0, pi*pi/3.0, 0, 6.0/5.0
def _entropy(self):
return 1.0
logistic = logistic_gen(name='logistic')
## Log Gamma
#
class loggamma_gen(rv_continuous):
"""A log gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loggamma` is::
loggamma.pdf(x, c) = exp(c*x-exp(x)) / gamma(c)
for all ``x, c > 0``.
%(example)s
"""
def _rvs(self, c):
return log(mtrand.gamma(c, size=self._size))
def _pdf(self, x, c):
return exp(c*x-exp(x)-gamln(c))
def _cdf(self, x, c):
return special.gammainc(c, exp(x))
def _ppf(self, q, c):
return log(special.gammaincinv(c,q))
def _munp(self,n,*args):
# use generic moment calculation using ppf
return self._mom0_sc(n,*args)
loggamma = loggamma_gen(name='loggamma', shapes='c')
## Log-Laplace (Log Double Exponential)
##
class loglaplace_gen(rv_continuous):
"""A log-Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loglaplace` is::
loglaplace.pdf(x, c) = c / 2 * x**(c-1), for 0 < x < 1
= c / 2 * x**(-c-1), for x >= 1
for ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
cd2 = c/2.0
c = where(x < 1, c, -c)
return cd2*x**(c-1)
def _cdf(self, x, c):
return where(x < 1, 0.5*x**c, 1-0.5*x**(-c))
def _ppf(self, q, c):
return where(q < 0.5, (2.0*q)**(1.0/c), (2*(1.0-q))**(-1.0/c))
def _entropy(self, c):
return log(2.0/c) + 1.0
loglaplace = loglaplace_gen(a=0.0, name='loglaplace', shapes='c')
## Lognormal (Cobb-Douglass)
## std is a shape parameter and is the variance of the underlying
## distribution.
## the mean of the underlying distribution is log(scale)
class lognorm_gen(rv_continuous):
"""A lognormal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `lognorm` is::
lognorm.pdf(x, s) = 1 / (s*x*sqrt(2*pi)) * exp(-1/2*(log(x)/s)**2)
for ``x > 0``, ``s > 0``.
If log x is normally distributed with mean mu and variance sigma**2,
then x is log-normally distributed with shape paramter sigma and scale
parameter exp(mu).
%(example)s
"""
def _rvs(self, s):
return exp(s * norm.rvs(size=self._size))
def _pdf(self, x, s):
Px = exp(-log(x)**2 / (2*s**2))
return Px / (s*x*sqrt(2*pi))
def _cdf(self, x, s):
return norm.cdf(log(x)/s)
def _ppf(self, q, s):
return exp(s*norm._ppf(q))
def _stats(self, s):
p = exp(s*s)
mu = sqrt(p)
mu2 = p*(p-1)
g1 = sqrt((p-1))*(2+p)
g2 = numpy.polyval([1,2,3,0,-6.0],p)
return mu, mu2, g1, g2
def _entropy(self, s):
return 0.5*(1+log(2*pi)+2*log(s))
lognorm = lognorm_gen(a=0.0, name='lognorm', shapes='s')
# Gibrat's distribution is just lognormal with s=1
class gilbrat_gen(lognorm_gen):
"""A Gilbrat continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gilbrat` is::
gilbrat.pdf(x) = 1/(x*sqrt(2*pi)) * exp(-1/2*(log(x))**2)
%(example)s
"""
def _rvs(self):
return lognorm_gen._rvs(self, 1.0)
def _pdf(self, x):
return lognorm_gen._pdf(self, x, 1.0)
def _cdf(self, x):
return lognorm_gen._cdf(self, x, 1.0)
def _ppf(self, q):
return lognorm_gen._ppf(self, q, 1.0)
def _stats(self):
return lognorm_gen._stats(self, 1.0)
def _entropy(self):
return 0.5*log(2*pi) + 0.5
gilbrat = gilbrat_gen(a=0.0, name='gilbrat')
# MAXWELL
class maxwell_gen(rv_continuous):
"""A Maxwell continuous random variable.
%(before_notes)s
Notes
-----
A special case of a `chi` distribution, with ``df = 3``, ``loc = 0.0``,
and given ``scale = 1.0 / sqrt(a)``, where a is the parameter used in
the Mathworld description [1]_.
The probability density function for `maxwell` is::
maxwell.pdf(x, a) = sqrt(2/pi)x**2 * exp(-x**2/2)
for ``x > 0``.
References
----------
.. [1] http://mathworld.wolfram.com/MaxwellDistribution.html
%(example)s
"""
def _rvs(self):
return chi.rvs(3.0,size=self._size)
def _pdf(self, x):
return sqrt(2.0/pi)*x*x*exp(-x*x/2.0)
def _cdf(self, x):
return special.gammainc(1.5,x*x/2.0)
def _ppf(self, q):
return sqrt(2*special.gammaincinv(1.5,q))
def _stats(self):
val = 3*pi-8
return 2*sqrt(2.0/pi), 3-8/pi, sqrt(2)*(32-10*pi)/val**1.5, \
(-12*pi*pi + 160*pi - 384) / val**2.0
def _entropy(self):
return _EULER + 0.5*log(2*pi)-0.5
maxwell = maxwell_gen(a=0.0, name='maxwell')
# Mielke's Beta-Kappa
class mielke_gen(rv_continuous):
"""A Mielke's Beta-Kappa continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `mielke` is::
mielke.pdf(x, k, s) = k * x**(k-1) / (1+x**s)**(1+k/s)
for ``x > 0``.
%(example)s
"""
def _pdf(self, x, k, s):
return k*x**(k-1.0) / (1.0+x**s)**(1.0+k*1.0/s)
def _cdf(self, x, k, s):
return x**k / (1.0+x**s)**(k*1.0/s)
def _ppf(self, q, k, s):
qsk = pow(q,s*1.0/k)
return pow(qsk/(1.0-qsk),1.0/s)
mielke = mielke_gen(a=0.0, name='mielke', shapes="k, s")
# Nakagami (cf Chi)
class nakagami_gen(rv_continuous):
"""A Nakagami continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nakagami` is::
nakagami.pdf(x, nu) = 2 * nu**nu / gamma(nu) *
x**(2*nu-1) * exp(-nu*x**2)
for ``x > 0``, ``nu > 0``.
%(example)s
"""
def _pdf(self, x, nu):
return 2*nu**nu/gam(nu)*(x**(2*nu-1.0))*exp(-nu*x*x)
def _cdf(self, x, nu):
return special.gammainc(nu,nu*x*x)
def _ppf(self, q, nu):
return sqrt(1.0/nu*special.gammaincinv(nu,q))
def _stats(self, nu):
mu = gam(nu+0.5)/gam(nu)/sqrt(nu)
mu2 = 1.0-mu*mu
g1 = mu*(1-4*nu*mu2)/2.0/nu/mu2**1.5
g2 = -6*mu**4*nu + (8*nu-2)*mu**2-2*nu + 1
g2 /= nu*mu2**2.0
return mu, mu2, g1, g2
nakagami = nakagami_gen(a=0.0, name="nakagami", shapes='nu')
# Non-central chi-squared
# nc is lambda of definition, df is nu
class ncx2_gen(rv_continuous):
"""A non-central chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncx2` is::
ncx2.pdf(x, df, nc) = exp(-(nc+df)/2) * 1/2 * (x/nc)**((df-2)/4)
* I[(df-2)/2](sqrt(nc*x))
for ``x > 0``.
%(example)s
"""
def _rvs(self, df, nc):
return mtrand.noncentral_chisquare(df,nc,self._size)
def _logpdf(self, x, df, nc):
a = arr(df/2.0)
fac = -nc/2.0 - x/2.0 + (a-1)*np.log(x) - a*np.log(2) - special.gammaln(a)
return fac + np.nan_to_num(np.log(special.hyp0f1(a, nc * x/4.0)))
def _pdf(self, x, df, nc):
return np.exp(self._logpdf(x, df, nc))
def _cdf(self, x, df, nc):
return special.chndtr(x,df,nc)
def _ppf(self, q, df, nc):
return special.chndtrix(q,df,nc)
def _stats(self, df, nc):
val = df + 2.0*nc
return df + nc, 2*val, sqrt(8)*(val+nc)/val**1.5, \
12.0*(val+2*nc)/val**2.0
ncx2 = ncx2_gen(a=0.0, name='ncx2', shapes="df, nc")
# Non-central F
class ncf_gen(rv_continuous):
"""A non-central F distribution continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncf` is::
ncf.pdf(x, df1, df2, nc) = exp(nc/2 + nc*df1*x/(2*(df1*x+df2)))
* df1**(df1/2) * df2**(df2/2) * x**(df1/2-1)
* (df2+df1*x)**(-(df1+df2)/2)
* gamma(df1/2)*gamma(1+df2/2)
* L^{v1/2-1}^{v2/2}(-nc*v1*x/(2*(v1*x+v2)))
/ (B(v1/2, v2/2) * gamma((v1+v2)/2))
for ``df1, df2, nc > 0``.
%(example)s
"""
def _rvs(self, dfn, dfd, nc):
return mtrand.noncentral_f(dfn,dfd,nc,self._size)
def _pdf_skip(self, x, dfn, dfd, nc):
n1,n2 = dfn, dfd
term = -nc/2+nc*n1*x/(2*(n2+n1*x)) + gamln(n1/2.)+gamln(1+n2/2.)
term -= gamln((n1+n2)/2.0)
Px = exp(term)
Px *= n1**(n1/2) * n2**(n2/2) * x**(n1/2-1)
Px *= (n2+n1*x)**(-(n1+n2)/2)
Px *= special.assoc_laguerre(-nc*n1*x/(2.0*(n2+n1*x)),n2/2,n1/2-1)
Px /= special.beta(n1/2,n2/2)
#this function does not have a return
# drop it for now, the generic function seems to work ok
def _cdf(self, x, dfn, dfd, nc):
return special.ncfdtr(dfn,dfd,nc,x)
def _ppf(self, q, dfn, dfd, nc):
return special.ncfdtri(dfn, dfd, nc, q)
def _munp(self, n, dfn, dfd, nc):
val = (dfn *1.0/dfd)**n
term = gamln(n+0.5*dfn) + gamln(0.5*dfd-n) - gamln(dfd*0.5)
val *= exp(-nc / 2.0+term)
val *= special.hyp1f1(n+0.5*dfn, 0.5*dfn, 0.5*nc)
return val
def _stats(self, dfn, dfd, nc):
mu = where(dfd <= 2, inf, dfd / (dfd-2.0)*(1+nc*1.0/dfn))
mu2 = where(dfd <=4, inf, 2*(dfd*1.0/dfn)**2.0 * \
((dfn+nc/2.0)**2.0 + (dfn+nc)*(dfd-2.0)) / \
((dfd-2.0)**2.0 * (dfd-4.0)))
return mu, mu2, None, None
ncf = ncf_gen(a=0.0, name='ncf', shapes="dfn, dfd, nc")
## Student t distribution
class t_gen(rv_continuous):
"""A Student's T continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `t` is::
gamma((df+1)/2)
t.pdf(x, df) = ---------------------------------------------------
sqrt(pi*df) * gamma(df/2) * (1+x**2/df)**((df+1)/2)
for ``df > 0``.
%(example)s
"""
def _rvs(self, df):
return mtrand.standard_t(df, size=self._size)
#Y = f.rvs(df, df, size=self._size)
#sY = sqrt(Y)
#return 0.5*sqrt(df)*(sY-1.0/sY)
def _pdf(self, x, df):
r = arr(df*1.0)
Px = exp(gamln((r+1)/2)-gamln(r/2))
Px /= sqrt(r*pi)*(1+(x**2)/r)**((r+1)/2)
return Px
def _logpdf(self, x, df):
r = df*1.0
lPx = gamln((r+1)/2)-gamln(r/2)
lPx -= 0.5*log(r*pi) + (r+1)/2*log(1+(x**2)/r)
return lPx
def _cdf(self, x, df):
return special.stdtr(df, x)
def _sf(self, x, df):
return special.stdtr(df, -x)
def _ppf(self, q, df):
return special.stdtrit(df, q)
def _isf(self, q, df):
return -special.stdtrit(df, q)
def _stats(self, df):
mu2 = where(df > 2, df / (df-2.0), inf)
g1 = where(df > 3, 0.0, nan)
g2 = where(df > 4, 6.0/(df-4.0), nan)
return 0, mu2, g1, g2
t = t_gen(name='t', shapes="df")
## Non-central T distribution
class nct_gen(rv_continuous):
"""A non-central Student's T continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nct` is::
df**(df/2) * gamma(df+1)
nct.pdf(x, df, nc) = ----------------------------------------------------
2**df*exp(nc**2/2) * (df+x**2)**(df/2) * gamma(df/2)
for ``df > 0``, ``nc > 0``.
%(example)s
"""
def _rvs(self, df, nc):
return norm.rvs(loc=nc,size=self._size)*sqrt(df) / sqrt(chi2.rvs(df,size=self._size))
def _pdf(self, x, df, nc):
n = df*1.0
nc = nc*1.0
x2 = x*x
ncx2 = nc*nc*x2
fac1 = n + x2
trm1 = n/2.*log(n) + gamln(n+1)
trm1 -= n*log(2)+nc*nc/2.+(n/2.)*log(fac1)+gamln(n/2.)
Px = exp(trm1)
valF = ncx2 / (2*fac1)
trm1 = sqrt(2)*nc*x*special.hyp1f1(n/2+1,1.5,valF)
trm1 /= arr(fac1*special.gamma((n+1)/2))
trm2 = special.hyp1f1((n+1)/2,0.5,valF)
trm2 /= arr(sqrt(fac1)*special.gamma(n/2+1))
Px *= trm1+trm2
return Px
def _cdf(self, x, df, nc):
return special.nctdtr(df, nc, x)
def _ppf(self, q, df, nc):
return special.nctdtrit(df, nc, q)
def _stats(self, df, nc, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
val1 = gam((df-1.0)/2.0)
val2 = gam(df/2.0)
if 'm' in moments:
mu = nc*sqrt(df/2.0)*val1/val2
if 'v' in moments:
var = (nc*nc+1.0)*df/(df-2.0)
var -= nc*nc*df* val1**2 / 2.0 / val2**2
mu2 = var
if 's' in moments:
g1n = 2*nc*sqrt(df)*val1*((nc*nc*(2*df-7)-3)*val2**2 \
-nc*nc*(df-2)*(df-3)*val1**2)
g1d = (df-3)*sqrt(2*df*(nc*nc+1)/(df-2) - \
nc*nc*df*(val1/val2)**2) * val2 * \
(nc*nc*(df-2)*val1**2 - \
2*(nc*nc+1)*val2**2)
g1 = g1n/g1d
if 'k' in moments:
g2n = 2*(-3*nc**4*(df-2)**2 *(df-3) *(df-4)*val1**4 + \
2**(6-2*df) * nc*nc*(df-2)*(df-4)* \
(nc*nc*(2*df-7)-3)*pi* gam(df+1)**2 - \
4*(nc**4*(df-5)-6*nc*nc-3)*(df-3)*val2**4)
g2d = (df-3)*(df-4)*(nc*nc*(df-2)*val1**2 - \
2*(nc*nc+1)*val2)**2
g2 = g2n / g2d
return mu, mu2, g1, g2
nct = nct_gen(name="nct", shapes="df, nc")
# Pareto
class pareto_gen(rv_continuous):
"""A Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pareto` is::
pareto.pdf(x, b) = b / x**(b+1)
for ``x >= 1``, ``b > 0``.
%(example)s
"""
def _pdf(self, x, b):
return b * x**(-b-1)
def _cdf(self, x, b):
return 1 - x**(-b)
def _ppf(self, q, b):
return pow(1-q, -1.0/b)
def _stats(self, b, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
if 'm' in moments:
mask = b > 1
bt = extract(mask,b)
mu = valarray(shape(b),value=inf)
place(mu, mask, bt / (bt-1.0))
if 'v' in moments:
mask = b > 2
bt = extract( mask,b)
mu2 = valarray(shape(b), value=inf)
place(mu2, mask, bt / (bt-2.0) / (bt-1.0)**2)
if 's' in moments:
mask = b > 3
bt = extract( mask,b)
g1 = valarray(shape(b), value=nan)
vals = 2*(bt+1.0)*sqrt(b-2.0)/((b-3.0)*sqrt(b))
place(g1, mask, vals)
if 'k' in moments:
mask = b > 4
bt = extract( mask,b)
g2 = valarray(shape(b), value=nan)
vals = 6.0*polyval([1.0,1.0,-6,-2],bt)/ \
polyval([1.0,-7.0,12.0,0.0],bt)
place(g2, mask, vals)
return mu, mu2, g1, g2
def _entropy(self, c):
return 1 + 1.0/c - log(c)
pareto = pareto_gen(a=1.0, name="pareto", shapes="b")
# LOMAX (Pareto of the second kind.)
class lomax_gen(rv_continuous):
"""A Lomax (Pareto of the second kind) continuous random variable.
%(before_notes)s
Notes
-----
The Lomax distribution is a special case of the Pareto distribution, with
(loc=-1.0).
The probability density function for `lomax` is::
lomax.pdf(x, c) = c / (1+x)**(c+1)
for ``x >= 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return c*1.0/(1.0+x)**(c+1.0)
def _logpdf(self, x, c):
return log(c) - (c+1)*log(1+x)
def _cdf(self, x, c):
return 1.0-1.0/(1.0+x)**c
def _sf(self, x, c):
return 1.0/(1.0+x)**c
def _logsf(self, x, c):
return -c*log(1+x)
def _ppf(self, q, c):
return pow(1.0-q,-1.0/c)-1
def _stats(self, c):
mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk')
return mu, mu2, g1, g2
def _entropy(self, c):
return 1+1.0/c-log(c)
lomax = lomax_gen(a=0.0, name="lomax", shapes="c")
## Power-function distribution
## Special case of beta dist. with d =1.0
class powerlaw_gen(rv_continuous):
"""A power-function continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlaw` is::
powerlaw.pdf(x, a) = a * x**(a-1)
for ``0 <= x <= 1``, ``a > 0``.
%(example)s
"""
def _pdf(self, x, a):
return a*x**(a-1.0)
def _logpdf(self, x, a):
return log(a) + (a-1)*log(x)
def _cdf(self, x, a):
return x**(a*1.0)
def _logcdf(self, x, a):
return a*log(x)
def _ppf(self, q, a):
return pow(q, 1.0/a)
def _stats(self, a):
return a/(a+1.0), a*(a+2.0)/(a+1.0)**2, \
2*(1.0-a)*sqrt((a+2.0)/(a*(a+3.0))), \
6*polyval([1,-1,-6,2],a)/(a*(a+3.0)*(a+4))
def _entropy(self, a):
return 1 - 1.0/a - log(a)
powerlaw = powerlaw_gen(a=0.0, b=1.0, name="powerlaw", shapes="a")
# Power log normal
class powerlognorm_gen(rv_continuous):
"""A power log-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlognorm` is::
powerlognorm.pdf(x, c, s) = c / (x*s) * phi(log(x)/s) *
(Phi(-log(x)/s))**(c-1),
where ``phi`` is the normal pdf, and ``Phi`` is the normal cdf,
and ``x > 0``, ``s, c > 0``.
%(example)s
"""
def _pdf(self, x, c, s):
return c/(x*s)*norm.pdf(log(x)/s)*pow(norm.cdf(-log(x)/s),c*1.0-1.0)
def _cdf(self, x, c, s):
return 1.0 - pow(norm.cdf(-log(x)/s),c*1.0)
def _ppf(self, q, c, s):
return exp(-s*norm.ppf(pow(1.0-q,1.0/c)))
powerlognorm = powerlognorm_gen(a=0.0, name="powerlognorm", shapes="c, s")
# Power Normal
class powernorm_gen(rv_continuous):
"""A power normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powernorm` is::
powernorm.pdf(x, c) = c * phi(x) * (Phi(-x))**(c-1)
where ``phi`` is the normal pdf, and ``Phi`` is the normal cdf,
and ``x > 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return c*_norm_pdf(x)* \
(_norm_cdf(-x)**(c-1.0))
def _logpdf(self, x, c):
return log(c) + _norm_logpdf(x) + (c-1)*_norm_logcdf(-x)
def _cdf(self, x, c):
return 1.0-_norm_cdf(-x)**(c*1.0)
def _ppf(self, q, c):
return -norm.ppf(pow(1.0-q,1.0/c))
powernorm = powernorm_gen(name='powernorm', shapes="c")
# R-distribution ( a general-purpose distribution with a
# variety of shapes.
# FIXME: PPF does not work.
class rdist_gen(rv_continuous):
"""An R-distributed continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rdist` is::
rdist.pdf(x, c) = (1-x**2)**(c/2-1) / B(1/2, c/2)
for ``-1 <= x <= 1``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return np.power((1.0-x*x),c/2.0-1) / special.beta(0.5,c/2.0)
def _cdf_skip(self, x, c):
#error inspecial.hyp2f1 for some values see tickets 758, 759
return 0.5 + x/special.beta(0.5,c/2.0)* \
special.hyp2f1(0.5,1.0-c/2.0,1.5,x*x)
def _munp(self, n, c):
return (1-(n % 2))*special.beta((n+1.0)/2,c/2.0)
rdist = rdist_gen(a=-1.0, b=1.0, name="rdist", shapes="c")
# Rayleigh distribution (this is chi with df=2 and loc=0.0)
# scale is the mode.
class rayleigh_gen(rv_continuous):
"""A Rayleigh continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rayleigh` is::
rayleigh.pdf(r) = r * exp(-r**2/2)
for ``x >= 0``.
%(example)s
"""
def _rvs(self):
return chi.rvs(2,size=self._size)
def _pdf(self, r):
return r*exp(-r*r/2.0)
def _cdf(self, r):
return 1.0-exp(-r*r/2.0)
def _ppf(self, q):
return sqrt(-2*log(1-q))
def _stats(self):
val = 4-pi
return np.sqrt(pi/2), val/2, 2*(pi-3)*sqrt(pi)/val**1.5, \
6*pi/val-16/val**2
def _entropy(self):
return _EULER/2.0 + 1 - 0.5*log(2)
rayleigh = rayleigh_gen(a=0.0, name="rayleigh")
# Reciprocal Distribution
class reciprocal_gen(rv_continuous):
"""A reciprocal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `reciprocal` is::
reciprocal.pdf(x, a, b) = 1 / (x*log(b/a))
for ``a <= x <= b``, ``a, b > 0``.
%(example)s
"""
def _argcheck(self, a, b):
self.a = a
self.b = b
self.d = log(b*1.0 / a)
return (a > 0) & (b > 0) & (b > a)
def _pdf(self, x, a, b):
# argcheck should be called before _pdf
return 1.0/(x*self.d)
def _logpdf(self, x, a, b):
return -log(x) - log(self.d)
def _cdf(self, x, a, b):
return (log(x)-log(a)) / self.d
def _ppf(self, q, a, b):
return a*pow(b*1.0/a,q)
def _munp(self, n, a, b):
return 1.0/self.d / n * (pow(b*1.0,n) - pow(a*1.0,n))
def _entropy(self,a,b):
return 0.5*log(a*b)+log(log(b/a))
reciprocal = reciprocal_gen(name="reciprocal", shapes="a, b")
# Rice distribution
# FIXME: PPF does not work.
class rice_gen(rv_continuous):
"""A Rice continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rice` is::
rice.pdf(x, b) = x * exp(-(x**2+b**2)/2) * I[0](x*b)
for ``x > 0``, ``b > 0``.
%(example)s
"""
def _pdf(self, x, b):
return x*exp(-(x*x+b*b)/2.0)*special.i0(x*b)
def _logpdf(self, x, b):
return log(x) - (x*x + b*b)/2.0 + log(special.i0(x*b))
def _munp(self, n, b):
nd2 = n/2.0
n1 = 1+nd2
b2 = b*b/2.0
return 2.0**(nd2)*exp(-b2)*special.gamma(n1) * \
special.hyp1f1(n1,1,b2)
rice = rice_gen(a=0.0, name="rice", shapes="b")
# Reciprocal Inverse Gaussian
# FIXME: PPF does not work.
class recipinvgauss_gen(rv_continuous):
"""A reciprocal inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `recipinvgauss` is::
recipinvgauss.pdf(x, mu) = 1/sqrt(2*pi*x) * exp(-(1-mu*x)**2/(2*x*mu**2))
for ``x >= 0``.
%(example)s
"""
def _rvs(self, mu): #added, taken from invgauss
return 1.0/mtrand.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
return 1.0/sqrt(2*pi*x)*exp(-(1-mu*x)**2.0 / (2*x*mu**2.0))
def _logpdf(self, x, mu):
return -(1-mu*x)**2.0 / (2*x*mu**2.0) - 0.5*log(2*pi*x)
def _cdf(self, x, mu):
trm1 = 1.0/mu - x
trm2 = 1.0/mu + x
isqx = 1.0/sqrt(x)
return 1.0-_norm_cdf(isqx*trm1)-exp(2.0/mu)*_norm_cdf(-isqx*trm2)
# xb=50 or something large is necessary for stats to converge without exception
recipinvgauss = recipinvgauss_gen(a=0.0, xb=50, name='recipinvgauss',
shapes="mu")
# Semicircular
class semicircular_gen(rv_continuous):
"""A semicircular continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `semicircular` is::
semicircular.pdf(x) = 2/pi * sqrt(1-x**2)
for ``-1 <= x <= 1``.
%(example)s
"""
def _pdf(self, x):
return 2.0/pi*sqrt(1-x*x)
def _cdf(self, x):
return 0.5+1.0/pi*(x*sqrt(1-x*x) + arcsin(x))
def _stats(self):
return 0, 0.25, 0, -1.0
def _entropy(self):
return 0.64472988584940017414
semicircular = semicircular_gen(a=-1.0, b=1.0, name="semicircular")
# Triangular
class triang_gen(rv_continuous):
"""A triangular continuous random variable.
%(before_notes)s
Notes
-----
The triangular distribution can be represented with an up-sloping line from
``loc`` to ``(loc + c*scale)`` and then downsloping for ``(loc + c*scale)``
to ``(loc+scale)``.
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
"""
def _rvs(self, c):
return mtrand.triangular(0, c, 1, self._size)
def _argcheck(self, c):
return (c >= 0) & (c <= 1)
def _pdf(self, x, c):
return where(x < c, 2*x/c, 2*(1-x)/(1-c))
def _cdf(self, x, c):
return where(x < c, x*x/c, (x*x-2*x+c)/(c-1))
def _ppf(self, q, c):
return where(q < c, sqrt(c*q), 1-sqrt((1-c)*(1-q)))
def _stats(self, c):
return (c+1.0)/3.0, (1.0-c+c*c)/18, sqrt(2)*(2*c-1)*(c+1)*(c-2) / \
(5*(1.0-c+c*c)**1.5), -3.0/5.0
def _entropy(self,c):
return 0.5-log(2)
triang = triang_gen(a=0.0, b=1.0, name="triang", shapes="c")
# Truncated Exponential
class truncexpon_gen(rv_continuous):
"""A truncated exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `truncexpon` is::
truncexpon.pdf(x, b) = exp(-x) / (1-exp(-b))
for ``0 < x < b``.
%(example)s
"""
def _argcheck(self, b):
self.b = b
return (b > 0)
def _pdf(self, x, b):
return exp(-x)/(1-exp(-b))
def _logpdf(self, x, b):
return -x - log(1-exp(-b))
def _cdf(self, x, b):
return (1.0-exp(-x))/(1-exp(-b))
def _ppf(self, q, b):
return -log(1-q+q*exp(-b))
def _munp(self, n, b):
#wrong answer with formula, same as in continuous.pdf
#return gam(n+1)-special.gammainc(1+n,b)
if n == 1:
return (1-(b+1)*exp(-b))/(-expm1(-b))
elif n == 2:
return 2*(1-0.5*(b*b+2*b+2)*exp(-b))/(-expm1(-b))
else:
#return generic for higher moments
#return rv_continuous._mom1_sc(self,n, b)
return self._mom1_sc(n, b)
def _entropy(self, b):
eB = exp(b)
return log(eB-1)+(1+eB*(b-1.0))/(1.0-eB)
truncexpon = truncexpon_gen(a=0.0, name='truncexpon', shapes="b")
# Truncated Normal
class truncnorm_gen(rv_continuous):
"""A truncated normal continuous random variable.
%(before_notes)s
Notes
-----
The standard form of this distribution is a standard normal truncated to
the range [a,b] --- notice that a and b are defined over the domain of the
standard normal. To convert clip values for a specific mean and standard
deviation, use::
a, b = (myclip_a - my_mean) / my_std, (myclip_b - my_mean) / my_std
%(example)s
"""
def _argcheck(self, a, b):
self.a = a
self.b = b
self._nb = _norm_cdf(b)
self._na = _norm_cdf(a)
self._delta = self._nb - self._na
self._logdelta = log(self._delta)
return (a != b)
# All of these assume that _argcheck is called first
# and no other thread calls _pdf before.
def _pdf(self, x, a, b):
return _norm_pdf(x) / self._delta
def _logpdf(self, x, a, b):
return _norm_logpdf(x) - self._logdelta
def _cdf(self, x, a, b):
return (_norm_cdf(x) - self._na) / self._delta
def _ppf(self, q, a, b):
return norm._ppf(q*self._nb + self._na*(1.0-q))
def _stats(self, a, b):
nA, nB = self._na, self._nb
d = nB - nA
pA, pB = _norm_pdf(a), _norm_pdf(b)
mu = (pA - pB) / d #correction sign
mu2 = 1 + (a*pA - b*pB) / d - mu*mu
return mu, mu2, None, None
truncnorm = truncnorm_gen(name='truncnorm', shapes="a, b")
# Tukey-Lambda
# FIXME: RVS does not work.
class tukeylambda_gen(rv_continuous):
"""A Tukey-Lamdba continuous random variable.
%(before_notes)s
Notes
-----
A flexible distribution, able to represent and interpolate between the
following distributions:
- Cauchy (lam=-1)
- logistic (lam=0.0)
- approx Normal (lam=0.14)
- u-shape (lam = 0.5)
- uniform from -1 to 1 (lam = 1)
%(example)s
"""
def _argcheck(self, lam):
# lam in RR.
return np.ones(np.shape(lam), dtype=bool)
def _pdf(self, x, lam):
Fx = arr(special.tklmbda(x,lam))
Px = Fx**(lam-1.0) + (arr(1-Fx))**(lam-1.0)
Px = 1.0/arr(Px)
return where((lam <= 0) | (abs(x) < 1.0/arr(lam)), Px, 0.0)
def _cdf(self, x, lam):
return special.tklmbda(x, lam)
def _ppf(self, q, lam):
q = q*1.0
vals1 = (q**lam - (1-q)**lam)/lam
vals2 = log(q/(1-q))
return where((lam == 0)&(q==q), vals2, vals1)
def _stats(self, lam):
mu2 = 2*gam(lam+1.5)-lam*pow(4,-lam)*sqrt(pi)*gam(lam)*(1-2*lam)
mu2 /= lam*lam*(1+2*lam)*gam(1+1.5)
mu4 = 3*gam(lam)*gam(lam+0.5)*pow(2,-2*lam) / lam**3 / gam(2*lam+1.5)
mu4 += 2.0/lam**4 / (1+4*lam)
mu4 -= 2*sqrt(3)*gam(lam)*pow(2,-6*lam)*pow(3,3*lam) * \
gam(lam+1.0/3)*gam(lam+2.0/3) / (lam**3.0 * gam(2*lam+1.5) * \
gam(lam+0.5))
g2 = mu4 / mu2 / mu2 - 3.0
return 0, mu2, 0, g2
def _entropy(self, lam):
def integ(p):
return log(pow(p,lam-1)+pow(1-p,lam-1))
return integrate.quad(integ,0,1)[0]
tukeylambda = tukeylambda_gen(name='tukeylambda', shapes="lam")
# Uniform
class uniform_gen(rv_continuous):
"""A uniform continuous random variable.
This distribution is constant between `loc` and ``loc = scale``.
%(before_notes)s
%(example)s
"""
def _rvs(self):
return mtrand.uniform(0.0,1.0,self._size)
def _pdf(self, x):
return 1.0*(x==x)
def _cdf(self, x):
return x
def _ppf(self, q):
return q
def _stats(self):
return 0.5, 1.0/12, 0, -1.2
def _entropy(self):
return 0.0
uniform = uniform_gen(a=0.0, b=1.0, name='uniform')
# Von-Mises
# if x is not in range or loc is not in range it assumes they are angles
# and converts them to [-pi, pi] equivalents.
eps = numpy.finfo(float).eps
class vonmises_gen(rv_continuous):
"""A Von Mises continuous random variable.
%(before_notes)s
Notes
-----
If `x` is not in range or `loc` is not in range it assumes they are angles
and converts them to [-pi, pi] equivalents.
The probability density function for `vonmises` is::
vonmises.pdf(x, b) = exp(b*cos(x)) / (2*pi*I[0](b))
for ``-pi <= x <= pi``, ``b > 0``.
%(example)s
"""
def _rvs(self, b):
return mtrand.vonmises(0.0, b, size=self._size)
def _pdf(self, x, b):
return exp(b*cos(x)) / (2*pi*special.i0(b))
def _cdf(self, x, b):
return vonmises_cython.von_mises_cdf(b,x)
def _stats_skip(self, b):
return 0, None, 0, None
vonmises = vonmises_gen(name='vonmises', shapes="b")
## Wald distribution (Inverse Normal with shape parameter mu=1.0)
class wald_gen(invgauss_gen):
"""A Wald continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wald` is::
wald.pdf(x, a) = 1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x))
for ``x > 0``.
%(example)s
"""
def _rvs(self):
return mtrand.wald(1.0, 1.0, size=self._size)
def _pdf(self, x):
return invgauss._pdf(x, 1.0)
def _logpdf(self, x):
return invgauss._logpdf(x, 1.0)
def _cdf(self, x):
return invgauss._cdf(x, 1.0)
def _stats(self):
return 1.0, 1.0, 3.0, 15.0
wald = wald_gen(a=0.0, name="wald")
# Wrapped Cauchy
class wrapcauchy_gen(rv_continuous):
"""A wrapped Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wrapcauchy` is::
wrapcauchy.pdf(x, c) = (1-c**2) / (2*pi*(1+c**2-2*c*cos(x)))
for ``0 <= x <= 2*pi``, ``0 < c < 1``.
%(example)s
"""
def _argcheck(self, c):
return (c > 0) & (c < 1)
def _pdf(self, x, c):
return (1.0-c*c)/(2*pi*(1+c*c-2*c*cos(x)))
def _cdf(self, x, c):
output = 0.0*x
val = (1.0+c)/(1.0-c)
c1 = x<pi
c2 = 1-c1
xp = extract( c1,x)
#valp = extract(c1,val)
xn = extract( c2,x)
#valn = extract(c2,val)
if (any(xn)):
valn = extract(c2, np.ones_like(x)*val)
xn = 2*pi - xn
yn = tan(xn/2.0)
on = 1.0-1.0/pi*arctan(valn*yn)
place(output, c2, on)
if (any(xp)):
valp = extract(c1, np.ones_like(x)*val)
yp = tan(xp/2.0)
op = 1.0/pi*arctan(valp*yp)
place(output, c1, op)
return output
def _ppf(self, q, c):
val = (1.0-c)/(1.0+c)
rcq = 2*arctan(val*tan(pi*q))
rcmq = 2*pi-2*arctan(val*tan(pi*(1-q)))
return where(q < 1.0/2, rcq, rcmq)
def _entropy(self, c):
return log(2*pi*(1-c*c))
wrapcauchy = wrapcauchy_gen(a=0.0, b=2*pi, name='wrapcauchy', shapes="c")
### DISCRETE DISTRIBUTIONS
###
def entropy(pk,qk=None):
"""S = entropy(pk,qk=None)
calculate the entropy of a distribution given the p_k values
S = -sum(pk * log(pk), axis=0)
If qk is not None, then compute a relative entropy
S = sum(pk * log(pk / qk), axis=0)
Routine will normalize pk and qk if they don't sum to 1
"""
pk = arr(pk)
pk = 1.0* pk / sum(pk,axis=0)
if qk is None:
vec = where(pk == 0, 0.0, pk*log(pk))
else:
qk = arr(qk)
if len(qk) != len(pk):
raise ValueError("qk and pk must have same length.")
qk = 1.0*qk / sum(qk,axis=0)
# If qk is zero anywhere, then unless pk is zero at those places
# too, the relative entropy is infinite.
if any(take(pk,nonzero(qk==0.0),axis=0)!=0.0, 0):
return inf
vec = where (pk == 0, 0.0, -pk*log(pk / qk))
return -sum(vec,axis=0)
## Handlers for generic case where xk and pk are given
def _drv_pmf(self, xk, *args):
try:
return self.P[xk]
except KeyError:
return 0.0
def _drv_cdf(self, xk, *args):
indx = argmax((self.xk>xk),axis=-1)-1
return self.F[self.xk[indx]]
def _drv_ppf(self, q, *args):
indx = argmax((self.qvals>=q),axis=-1)
return self.Finv[self.qvals[indx]]
def _drv_nonzero(self, k, *args):
return 1
def _drv_moment(self, n, *args):
n = arr(n)
return sum(self.xk**n[newaxis,...] * self.pk, axis=0)
def _drv_moment_gen(self, t, *args):
t = arr(t)
return sum(exp(self.xk * t[newaxis,...]) * self.pk, axis=0)
def _drv2_moment(self, n, *args):
'''non-central moment of discrete distribution'''
#many changes, originally not even a return
tot = 0.0
diff = 1e100
#pos = self.a
pos = max(0.0, 1.0*self.a)
count = 0
#handle cases with infinite support
ulimit = max(1000, (min(self.b,1000) + max(self.a,-1000))/2.0 )
llimit = min(-1000, (min(self.b,1000) + max(self.a,-1000))/2.0 )
while (pos <= self.b) and ((pos <= ulimit) or \
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos,*args)
# use pmf because _pmf does not check support in randint
# and there might be problems ? with correct self.a, self.b at this stage
tot += diff
pos += self.inc
count += 1
if self.a < 0: #handle case when self.a = -inf
diff = 1e100
pos = -self.inc
while (pos >= self.a) and ((pos >= llimit) or \
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos,*args)
#using pmf instead of _pmf, see above
tot += diff
pos -= self.inc
count += 1
return tot
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
b = self.invcdf_b
a = self.invcdf_a
if isinf(b): # Be sure ending point is > q
b = max(100*q,10)
while 1:
if b >= self.b: qb = 1.0; break
qb = self._cdf(b,*args)
if (qb < q): b += 10
else: break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = min(-100*q,-10)
while 1:
if a <= self.a: qb = 0.0; break
qa = self._cdf(a,*args)
if (qa > q): a -= 10
else: break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b == a+1:
#testcase: return wrong number at lower index
#python -c "from scipy.stats import zipf;print zipf.ppf(0.01,2)" wrong
#python -c "from scipy.stats import zipf;print zipf.ppf([0.01,0.61,0.77,0.83],2)"
#python -c "from scipy.stats import logser;print logser.ppf([0.1,0.66, 0.86,0.93],0.6)"
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
a = c
qa = qc
elif (qc > q):
b = c
qb = qc
else:
return c
def reverse_dict(dict):
newdict = {}
sorted_keys = copy(dict.keys())
sorted_keys.sort()
for key in sorted_keys[::-1]:
newdict[dict[key]] = key
return newdict
def make_dict(keys, values):
d = {}
for key, value in zip(keys, values):
d[key] = value
return d
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""
A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances from for discrete random variables. rv_discrete can be used
to construct an arbitrary distribution with defined by a list of support
points and the corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments
values : tuple of two array_like
(xk, pk) where xk are points (integers) with positive probability pk
with sum(pk) = 1
inc : integer
increment for the support of the distribution, default: 1
other values have not been tested
badvalue : object, optional
The value in (masked) arrays that indicates a value that should be
ignored.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the first two arguments for all
its methods.
extradoc : str, optional
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
Methods
-------
generic.rvs(<shape(s)>, loc=0, size=1)
random variates
generic.pmf(x, <shape(s)>, loc=0)
probability mass function
logpmf(x, <shape(s)>, loc=0)
log of the probability density function
generic.cdf(x, <shape(s)>, loc=0)
cumulative density function
generic.logcdf(x, <shape(s)>, loc=0)
log of the cumulative density function
generic.sf(x, <shape(s)>, loc=0)
survival function (1-cdf --- sometimes more accurate)
generic.logsf(x, <shape(s)>, loc=0, scale=1)
log of the survival function
generic.ppf(q, <shape(s)>, loc=0)
percent point function (inverse of cdf --- percentiles)
generic.isf(q, <shape(s)>, loc=0)
inverse survival function (inverse of sf)
generic.moment(n, <shape(s)>, loc=0)
non-central n-th moment of the distribution. May not work for array arguments.
generic.stats(<shape(s)>, loc=0, moments='mv')
mean('m', axis=0), variance('v'), skew('s'), and/or kurtosis('k')
generic.entropy(<shape(s)>, loc=0)
entropy of the RV
generic.fit(data, <shape(s)>, loc=0)
Parameter estimates for generic data
generic.expect(func=None, args=(), loc=0, lb=None, ub=None, conditional=False)
Expected value of a function with respect to the distribution.
Additional kwd arguments passed to integrate.quad
generic.median(<shape(s)>, loc=0)
Median of the distribution.
generic.mean(<shape(s)>, loc=0)
Mean of the distribution.
generic.std(<shape(s)>, loc=0)
Standard deviation of the distribution.
generic.var(<shape(s)>, loc=0)
Variance of the distribution.
generic.interval(alpha, <shape(s)>, loc=0)
Interval that with `alpha` percent probability contains a random
realization of this distribution.
generic(<shape(s)>, loc=0)
calling a distribution instance returns a frozen distribution
Notes
-----
Alternatively, the object may be called (as a function) to fix
the shape and location parameters returning a
"frozen" discrete RV object:
myrv = generic(<shape(s)>, loc=0)
- frozen RV object with the same methods but holding the given shape
and location fixed.
You can construct an aribtrary discrete rv where P{X=xk} = pk
by passing to the rv_discrete initialization method (through the
values=keyword) a tuple of sequences (xk, pk) which describes only those
values of X (xk) that occur with nonzero probability (pk).
To create a new discrete distribution, we would do the following::
class poisson_gen(rv_continuous):
#"Poisson distribution"
def _pmf(self, k, mu):
...
and create an instance
poisson = poisson_gen(name="poisson", shapes="mu", longname='A Poisson')
The docstring can be created from a template.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> numargs = generic.numargs
>>> [ <shape(s)> ] = ['Replace with resonable value', ]*numargs
Display frozen pmf:
>>> rv = generic(<shape(s)>)
>>> x = np.arange(0, np.min(rv.dist.b, 3)+1)
>>> h = plt.plot(x, rv.pmf(x))
Check accuracy of cdf and ppf:
>>> prb = generic.cdf(x, <shape(s)>)
>>> h = plt.semilogy(np.abs(x-generic.ppf(prb, <shape(s)>))+1e-20)
Random number generation:
>>> R = generic.rvs(<shape(s)>, size=100)
Custom made discrete distribution:
>>> vals = [arange(7), (0.1, 0.2, 0.3, 0.1, 0.1, 0.1, 0.1)]
>>> custm = rv_discrete(name='custm', values=vals)
>>> h = plt.plot(vals[0], custm.pmf(vals[0]))
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8,values=None,inc=1,longname=None,
shapes=None, extradoc=None):
super(rv_generic,self).__init__()
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.a = a
self.b = b
self.invcdf_a = a # what's the difference to self.a, .b
self.invcdf_b = b
self.name = name
self.moment_tol = moment_tol
self.inc = inc
self._cdfvec = sgf(self._cdfsingle,otypes='d')
self.return_integers = 1
self.vecentropy = vectorize(self._entropy)
self.shapes = shapes
self.extradoc = extradoc
if values is not None:
self.xk, self.pk = values
self.return_integers = 0
indx = argsort(ravel(self.xk))
self.xk = take(ravel(self.xk),indx, 0)
self.pk = take(ravel(self.pk),indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.P = make_dict(self.xk, self.pk)
self.qvals = numpy.cumsum(self.pk,axis=0)
self.F = make_dict(self.xk, self.qvals)
self.Finv = reverse_dict(self.F)
self._ppf = instancemethod(sgf(_drv_ppf,otypes='d'),
self, rv_discrete)
self._pmf = instancemethod(sgf(_drv_pmf,otypes='d'),
self, rv_discrete)
self._cdf = instancemethod(sgf(_drv_cdf,otypes='d'),
self, rv_discrete)
self._nonzero = instancemethod(_drv_nonzero, self, rv_discrete)
self.generic_moment = instancemethod(_drv_moment,
self, rv_discrete)
self.moment_gen = instancemethod(_drv_moment_gen,
self, rv_discrete)
self.numargs=0
else:
cdf_signature = inspect.getargspec(self._cdf.im_func)
numargs1 = len(cdf_signature[0]) - 2
pmf_signature = inspect.getargspec(self._pmf.im_func)
numargs2 = len(pmf_signature[0]) - 2
self.numargs = max(numargs1, numargs2)
#nin correction needs to be after we know numargs
#correct nin for generic moment vectorization
self.vec_generic_moment = sgf(_drv2_moment, otypes='d')
self.vec_generic_moment.nin = self.numargs + 2
self.generic_moment = instancemethod(self.vec_generic_moment,
self, rv_discrete)
#correct nin for ppf vectorization
_vppf = sgf(_drv2_ppfsingle,otypes='d')
_vppf.nin = self.numargs + 2 # +1 is for self
self._vecppf = instancemethod(_vppf,
self, rv_discrete)
#now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if self.__doc__ is None:
self._construct_default_doc(longname=longname, extradoc=extradoc)
else:
self._construct_doc()
## This only works for old-style classes...
# self.__class__.__doc__ = self.__doc__
def _construct_default_doc(self, longname=None, extradoc=None):
"""Construct instance docstring from the rv_discrete template."""
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s discrete random variable.'%longname,
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc()
def _construct_doc(self):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict_discrete.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['callparams', 'default', 'before_notes']:
tempdict[item] = tempdict[item].replace(\
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
def _rvs(self, *args):
return self._ppf(mtrand.random_sample(self._size),*args)
def _nonzero(self, k, *args):
return floor(k)==k
def _argcheck(self, *args):
cond = 1
for arg in args:
cond &= (arg > 0)
return cond
def _pmf(self, k, *args):
return self._cdf(k,*args) - self._cdf(k-1,*args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _cdfsingle(self, k, *args):
m = arange(int(self.a),k+1)
return sum(self._pmf(m,*args),axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k,*args)
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x,*args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._vecppf(q, *args)
def _isf(self, q, *args):
return self._ppf(1-q,*args)
def _stats(self, *args):
return None, None, None, None
def _munp(self, n, *args):
return self.generic_moment(n, *args)
def rvs(self, *args, **kwargs):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
size : int or tuple of ints, optional
defining number of random variates (default=1)
Returns
-------
rvs : array_like
random variates of given `size`
"""
kwargs['discrete'] = True
return super(rv_discrete, self).rvs(*args, **kwargs)
def pmf(self, k,*args, **kwds):
"""
Probability mass function at k of the given RV.
Parameters
----------
k : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
Returns
-------
pmf : array_like
Probability mass function evaluated at k
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(arr,(k,loc))
args = tuple(map(arr,args))
k = arr((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k,*args)
cond = cond0 & cond1
output = zeros(shape(cond),'d')
place(output,(1-cond0) + np.isnan(k),self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output,cond,self._pmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k,*args, **kwds):
"""
Log of the probability mass function at k of the given RV.
Parameters
----------
k : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter. Default is 0.
Returns
-------
logpmf : array_like
Log of the probability mass function evaluated at k
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(arr,(k,loc))
args = tuple(map(arr,args))
k = arr((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k,*args)
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
place(output,(1-cond0) + np.isnan(k),self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output,cond,self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""
Cumulative distribution function at k of the given RV
Parameters
----------
k : array_like, int
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
Returns
-------
cdf : array_like
Cumulative distribution function evaluated at k
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(arr,(k,loc))
args = tuple(map(arr,args))
k = arr((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = zeros(shape(cond),'d')
place(output,(1-cond0) + np.isnan(k),self.badvalue)
place(output,cond2*(cond0==cond0), 1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output,cond,self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""
Log of the cumulative distribution function at k of the given RV
Parameters
----------
k : array_like, int
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at k
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(arr,(k,loc))
args = tuple(map(arr,args))
k = arr((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
place(output,(1-cond0) + np.isnan(k),self.badvalue)
place(output,cond2*(cond0==cond0), 0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output,cond,self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self,k,*args,**kwds):
"""
Survival function (1-cdf) at k of the given RV
Parameters
----------
k : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
Returns
-------
sf : array_like
Survival function evaluated at k
"""
loc= kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(arr,(k,loc))
args = tuple(map(arr,args))
k = arr(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond),'d')
place(output,(1-cond0) + np.isnan(k),self.badvalue)
place(output,cond2,1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output,cond,self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self,k,*args,**kwds):
"""
Log of the survival function (1-cdf) at k of the given RV
Parameters
----------
k : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
Returns
-------
sf : array_like
Survival function evaluated at k
"""
loc= kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(arr,(k,loc))
args = tuple(map(arr,args))
k = arr(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
place(output,(1-cond0) + np.isnan(k),self.badvalue)
place(output,cond2,0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output,cond,self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self,q,*args,**kwds):
"""
Percent point function (inverse of cdf) at q of the given RV
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale: array_like, optional
scale parameter (default=1)
Returns
-------
k : array_like
quantile corresponding to the lower tail probability, q.
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
q,loc = map(arr,(q,loc))
args = tuple(map(arr,args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q==1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond),value=self.badvalue,typecode='d')
#output type 'd' to handle nin and inf
place(output,(q==0)*(cond==cond), self.a-1)
place(output,cond2,self.b)
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output,cond,self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self,q,*args,**kwds):
"""
Inverse survival function (1-sf) at q of the given RV
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
Returns
-------
k : array_like
quantile corresponding to the upper tail probability, q.
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
q,loc = map(arr,(q,loc))
args = tuple(map(arr,args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q==1) & cond0
cond = cond0 & cond1
#old:
## output = valarray(shape(cond),value=self.b,typecode='d')
## #typecode 'd' to handle nin and inf
## place(output,(1-cond0)*(cond1==cond1), self.badvalue)
## place(output,cond2,self.a-1)
#same problem as with ppf
# copied from ppf and changed
output = valarray(shape(cond),value=self.badvalue,typecode='d')
#output type 'd' to handle nin and inf
place(output,(q==0)*(cond==cond), self.b)
place(output,cond2,self.a-1)
# call place only if at least 1 valid argument
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output,cond,self._isf(*goodargs) + loc) #PB same as ticket 766
if output.ndim == 0:
return output[()]
return output
def stats(self, *args, **kwds):
"""
Some statistics of the given discrete RV
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
moments : string, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default='mv')
Returns
-------
stats : sequence
of requested moments.
"""
loc,moments=map(kwds.get,['loc','moments'])
N = len(args)
if N > self.numargs:
if N == self.numargs + 1 and loc is None: # loc is given without keyword
loc = args[-1]
if N == self.numargs + 2 and moments is None: # loc, scale, and moments
loc, moments = args[-2:]
args = args[:self.numargs]
if loc is None: loc = 0.0
if moments is None: moments = 'mv'
loc = arr(loc)
args = tuple(map(arr,args))
cond = self._argcheck(*args) & (loc==loc)
signature = inspect.getargspec(self._stats.im_func)
if (signature[2] is not None) or ('moments' in signature[0]):
mu, mu2, g1, g2 = self._stats(*args,**{'moments':moments})
else:
mu, mu2, g1, g2 = self._stats(*args)
if g1 is None:
mu3 = None
else:
mu3 = g1*(mu2**1.5)
default = valarray(shape(cond), self.badvalue)
output = []
# Use only entries that are valid in calculation
goodargs = argsreduce(cond, *(args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
if 'm' in moments:
if mu is None:
mu = self._munp(1.0,*goodargs)
out0 = default.copy()
place(out0,cond,mu+loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
mu2 = mu2p - mu*mu
out0 = default.copy()
place(out0,cond,mu2)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
mu2 = mu2p - mu*mu
mu3 = mu3p - 3*mu*mu2 - mu**3
g1 = mu3 / mu2**1.5
out0 = default.copy()
place(out0,cond,g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
mu2 = mu2p - mu*mu
if mu3 is None:
mu3p = self._munp(3.0,*goodargs)
mu3 = mu3p - 3*mu*mu2 - mu**3
mu4 = mu4p - 4*mu*mu3 - 6*mu*mu*mu2 - mu**4
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0,cond,g2)
output.append(out0)
if len(output) == 1:
return output[0]
else:
return tuple(output)
def moment(self, n, *args, **kwds): # Non-central moments in standard form.
"""
n'th non-central moment of the distribution
Parameters
----------
n: int, n>=1
order of moment
arg1, arg2, arg3,...: float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : float, optional
location parameter (default=0)
scale : float, optional
scale parameter (default=1)
"""
loc = kwds.get('loc', 0)
scale = kwds.get('scale', 1)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0): raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
signature = inspect.getargspec(self._stats.im_func)
if (signature[2] is not None) or ('moments' in signature[0]):
dict = {'moments':{1:'m',2:'v',3:'vs',4:'vk'}[n]}
else:
dict = {}
mu, mu2, g1, g2 = self._stats(*args,**dict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# so E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n,k)*(S/L)^k E[Y^k],k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n,k,exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def freeze(self, *args, **kwds):
return rv_frozen(self, *args, **kwds)
def _entropy(self, *args):
if hasattr(self,'pk'):
return entropy(self.pk)
else:
mu = int(self.stats(*args, **{'moments':'m'}))
val = self.pmf(mu,*args)
if (val==0.0): ent = 0.0
else: ent = -val*log(val)
k = 1
term = 1.0
while (abs(term) > eps):
val = self.pmf(mu+k,*args)
if val == 0.0: term = 0.0
else: term = -val * log(val)
val = self.pmf(mu-k,*args)
if val != 0.0: term -= val*log(val)
k += 1
ent += term
return ent
def entropy(self, *args, **kwds):
loc= kwds.get('loc')
args, loc = self._fix_loc(args, loc)
loc = arr(loc)
args = map(arr,args)
cond0 = self._argcheck(*args) & (loc==loc)
output = zeros(shape(cond0),'d')
place(output,(1-cond0),self.badvalue)
goodargs = argsreduce(cond0, *args)
place(output,cond0,self.vecentropy(*goodargs))
return output
def __call__(self, *args, **kwds):
return self.freeze(*args,**kwds)
def expect(self, func=None, args=(), loc=0, lb=None, ub=None, conditional=False):
"""calculate expected value of a function with respect to the distribution
for discrete distribution
Parameters
----------
fn : function (default: identity mapping)
Function for which sum is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
optional keyword parameters
lb, ub : numbers
lower and upper bound for integration, default is set to the support
of the distribution, lb and ub are inclusive (ul<=k<=ub)
conditional : boolean (False)
If true then the expectation is corrected by the conditional
probability of the integration interval. The return value is the
expectation of the function, conditional on being in the given
interval (k such that ul<=k<=ub).
Returns
-------
expected value : float
Notes
-----
* function is not vectorized
* accuracy: uses self.moment_tol as stopping criterium
for heavy tailed distribution e.g. zipf(4), accuracy for
mean, variance in example is only 1e-5,
increasing precision (moment_tol) makes zipf very slow
* suppnmin=100 internal parameter for minimum number of points to evaluate
could be added as keyword parameter, to evaluate functions with
non-monotonic shapes, points include integers in (-suppnmin, suppnmin)
* uses maxcount=1000 limits the number of points that are evaluated
to break loop for infinite sums
(a maximum of suppnmin+1000 positive plus suppnmin+1000 negative integers
are evaluated)
"""
#moment_tol = 1e-12 # increase compared to self.moment_tol,
# too slow for only small gain in precision for zipf
#avoid endless loop with unbound integral, eg. var of zipf(2)
maxcount = 1000
suppnmin = 100 #minimum number of points to evaluate (+ and -)
if func is None:
def fun(x):
#loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
#loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint
# and there might be problems(?) with correct self.a, self.b at this stage
# maybe not anymore, seems to work now with _pmf
self._argcheck(*args) # (re)generate scalar self.a and self.b
if lb is None:
lb = (self.a)
else:
lb = lb - loc #convert bound for standardized distribution
if ub is None:
ub = (self.b)
else:
ub = ub - loc #convert bound for standardized distribution
if conditional:
if np.isposinf(ub)[()]:
#work around bug: stats.poisson.sf(stats.poisson.b, 2) is nan
invfac = 1 - self.cdf(lb-1,*args)
else:
invfac = 1 - self.cdf(lb-1,*args) - self.sf(ub,*args)
else:
invfac = 1.0
tot = 0.0
low, upp = self._ppf(0.001, *args), self._ppf(0.999, *args)
low = max(min(-suppnmin, low), lb)
upp = min(max(suppnmin, upp), ub)
supp = np.arange(low, upp+1, self.inc) #check limits
#print 'low, upp', low, upp
tot = np.sum(fun(supp))
diff = 1e100
pos = upp + self.inc
count = 0
#handle cases with infinite support
while (pos <= ub) and (diff > self.moment_tol) and count <= maxcount:
diff = fun(pos)
tot += diff
pos += self.inc
count += 1
if self.a < 0: #handle case when self.a = -inf
diff = 1e100
pos = low - self.inc
while (pos >= lb) and (diff > self.moment_tol) and count <= maxcount:
diff = fun(pos)
tot += diff
pos -= self.inc
count += 1
if count > maxcount:
# fixme: replace with proper warning
print 'sum did not converge'
return tot/invfac
# Binomial
class binom_gen(rv_discrete):
def _rvs(self, n, pr):
return mtrand.binomial(n,pr,self._size)
def _argcheck(self, n, pr):
self.b = n
return (n>=0) & (pr >= 0) & (pr <= 1)
def _logpmf(self, x, n, pr):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) +
gamln(n-k+1)))
return combiln + k*np.log(pr) + (n-k)*np.log(1-pr)
def _pmf(self, x, n, pr):
return exp(self._logpmf(x, n, pr))
def _cdf(self, x, n, pr):
k = floor(x)
vals = special.bdtr(k,n,pr)
return vals
def _sf(self, x, n, pr):
k = floor(x)
return special.bdtrc(k,n,pr)
def _ppf(self, q, n, pr):
vals = ceil(special.bdtrik(q,n,pr))
vals1 = vals-1
temp = special.bdtr(vals1,n,pr)
return where(temp >= q, vals1, vals)
def _stats(self, n, pr):
q = 1.0-pr
mu = n * pr
var = n * pr * q
g1 = (q-pr) / sqrt(n*pr*q)
g2 = (1.0-6*pr*q)/(n*pr*q)
return mu, var, g1, g2
def _entropy(self, n, pr):
k = r_[0:n+1]
vals = self._pmf(k,n,pr)
lvals = where(vals==0,0.0,log(vals))
return -sum(vals*lvals,axis=0)
binom = binom_gen(name='binom',shapes="n, pr",extradoc="""
Binomial distribution
Counts the number of successes in *n* independent
trials when the probability of success each time is *pr*.
binom.pmf(k,n,p) = choose(n,k)*p**k*(1-p)**(n-k)
for k in {0,1,...,n}
""")
# Bernoulli distribution
class bernoulli_gen(binom_gen):
def _rvs(self, pr):
return binom_gen._rvs(self, 1, pr)
def _argcheck(self, pr):
return (pr >=0 ) & (pr <= 1)
def _logpmf(self, x, pr):
return binom._logpmf(x, 1, pr)
def _pmf(self, x, pr):
return binom._pmf(x, 1, pr)
def _cdf(self, x, pr):
return binom._cdf(x, 1, pr)
def _sf(self, x, pr):
return binom._sf(x, 1, pr)
def _ppf(self, q, pr):
return binom._ppf(q, 1, pr)
def _stats(self, pr):
return binom._stats(1, pr)
def _entropy(self, pr):
return -pr*log(pr)-(1-pr)*log(1-pr)
bernoulli = bernoulli_gen(b=1,name='bernoulli',shapes="pr",extradoc="""
Bernoulli distribution
1 if binary experiment succeeds, 0 otherwise. Experiment
succeeds with probabilty *pr*.
bernoulli.pmf(k,p) = 1-p if k = 0
= p if k = 1
for k = 0,1
"""
)
# Negative binomial
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
Probability mass function, given by
``np.choose(k+n-1, n-1) * p**n * (1-p)**k`` for ``k >= 0``.
%(example)s
"""
def _rvs(self, n, pr):
return mtrand.negative_binomial(n, pr, self._size)
def _argcheck(self, n, pr):
return (n >= 0) & (pr >= 0) & (pr <= 1)
def _pmf(self, x, n, pr):
coeff = exp(gamln(n+x) - gamln(x+1) - gamln(n))
return coeff * power(pr,n) * power(1-pr,x)
def _logpmf(self, x, n, pr):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(pr) + x*log(1-pr)
def _cdf(self, x, n, pr):
k = floor(x)
return special.betainc(n, k+1, pr)
def _sf_skip(self, x, n, pr):
#skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k,n,pr)
def _ppf(self, q, n, pr):
vals = ceil(special.nbdtrik(q,n,pr))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1,n,pr)
return where(temp >= q, vals1, vals)
def _stats(self, n, pr):
Q = 1.0 / pr
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom', shapes="n, pr", extradoc="""
Negative binomial distribution
nbinom.pmf(k,n,p) = choose(k+n-1,n-1) * p**n * (1-p)**k
for k >= 0.
"""
)
## Geometric distribution
class geom_gen(rv_discrete):
def _rvs(self, pr):
return mtrand.geometric(pr,size=self._size)
def _argcheck(self, pr):
return (pr<=1) & (pr >= 0)
def _pmf(self, k, pr):
return (1-pr)**(k-1) * pr
def _logpmf(self, k, pr):
return (k-1)*log(1-pr) + pr
def _cdf(self, x, pr):
k = floor(x)
return (1.0-(1.0-pr)**k)
def _sf(self, x, pr):
k = floor(x)
return (1.0-pr)**k
def _ppf(self, q, pr):
vals = ceil(log(1.0-q)/log(1-pr))
temp = 1.0-(1.0-pr)**(vals-1)
return where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, pr):
mu = 1.0/pr
qr = 1.0-pr
var = qr / pr / pr
g1 = (2.0-pr) / sqrt(qr)
g2 = numpy.polyval([1,-6,6],pr)/(1.0-pr)
return mu, var, g1, g2
geom = geom_gen(a=1,name='geom', longname="A geometric",
shapes="pr", extradoc="""
Geometric distribution
geom.pmf(k,p) = (1-p)**(k-1)*p
for k >= 1
"""
)
## Hypergeometric distribution
class hypergeom_gen(rv_discrete):
"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
M is the total number of objects, n is total number of Type I objects.
The random variate represents the number of Type I objects in N drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The probability mass function is defined as::
pmf(k, M, n, N) = choose(n, k) * choose(M - n, N - k) / choose(M, N),
for N - (M-n) <= k <= min(m,N)
%(example)s
"""
def _rvs(self, M, n, N):
return mtrand.hypergeometric(n,M-n,N,size=self._size)
def _argcheck(self, M, n, N):
cond = rv_discrete._argcheck(self,M,n,N)
cond &= (n <= M) & (N <= M)
self.a = N-(M-n)
self.b = min(n,N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \
- gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \
+ gamln(N+1)
def _pmf(self, k, M, n, N):
#same as the following but numerically more precise
#return comb(good,k) * comb(bad,N-k) / comb(tot,N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
tot, good = M, n
n = good*1.0
m = (tot-good)*1.0
N = N*1.0
tot = m+n
p = n/tot
mu = N*p
var = m*n*N*(tot-N)*1.0/(tot*tot*(tot-1))
g1 = (m - n)*(tot-2*N) / (tot-2.0)*sqrt((tot-1.0)/(m*n*N*(tot-N)))
m2, m3, m4, m5 = m**2, m**3, m**4, m**5
n2, n3, n4, n5 = n**2, n**2, n**4, n**5
g2 = m3 - m5 + n*(3*m2-6*m3+m4) + 3*m*n2 - 12*m2*n2 + 8*m3*n2 + n3 \
- 6*m*n3 + 8*m2*n3 + m*n4 - n5 - 6*m3*N + 6*m4*N + 18*m2*n*N \
- 6*m3*n*N + 18*m*n2*N - 24*m2*n2*N - 6*n3*N - 6*m*n3*N \
+ 6*n4*N + N*N*(6*m2 - 6*m3 - 24*m*n + 12*m2*n + 6*n2 + \
12*m*n2 - 6*n3)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = r_[N-(M-n):min(n,N)+1]
vals = self.pmf(k,M,n,N)
lvals = where(vals==0.0,0.0,log(vals))
return -sum(vals*lvals,axis=0)
def _sf(self, k, M, n, N):
"""More precise calculation, 1 - cdf doesn't cut it."""
# This for loop is needed because `k` can be an array. If that's the
# case, the sf() method makes M, n and N arrays of the same shape. We
# therefore unpack all inputs args, so we can do the manual integration.
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Manual integration over probability mass function. More accurate
# than integrate.quad.
k2 = np.arange(quant + 1, draw + 1)
res.append(np.sum(self._pmf(k2, tot, good, draw)))
return np.asarray(res)
hypergeom = hypergeom_gen(name='hypergeom', shapes="M, n, N")
## Logarithmic (Log-Series), (Series) distribution
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
def _rvs(self, pr):
# looks wrong for pr>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return mtrand.logseries(pr,size=self._size)
def _argcheck(self, pr):
return (pr > 0) & (pr < 1)
def _pmf(self, k, pr):
return -pr**k * 1.0 / k / log(1-pr)
def _stats(self, pr):
r = log(1-pr)
mu = pr / (pr - 1.0) / r
mu2p = -pr / r / (pr-1.0)**2
var = mu2p - mu*mu
mu3p = -pr / r * (1.0+pr) / (1.0-pr)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / var**1.5
mu4p = -pr / r * (1.0/(pr-1)**2 - 6*pr/(pr-1)**3 + \
6*pr*pr / (pr-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1,name='logser', longname='A logarithmic',
shapes='pr', extradoc="""
Logarithmic (Log-Series, Series) distribution
logser.pmf(k,p) = - p**k / (k*log(1-p))
for k >= 1
"""
)
## Poisson distribution
class poisson_gen(rv_discrete):
def _rvs(self, mu):
return mtrand.poisson(mu, self._size)
def _pmf(self, k, mu):
Pk = k*log(mu)-gamln(k+1) - mu
return exp(Pk)
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k,mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k,mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q,mu))
vals1 = vals-1
temp = special.pdtr(vals1,mu)
return where((temp >= q), vals1, vals)
def _stats(self, mu):
var = mu
g1 = 1.0/arr(sqrt(mu))
g2 = 1.0 / arr(mu)
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson',
shapes="mu", extradoc="""
Poisson distribution
poisson.pmf(k, mu) = exp(-mu) * mu**k / k!
for k >= 0
"""
)
## (Planck) Discrete Exponential
class planck_gen(rv_discrete):
def _argcheck(self, lambda_):
if (lambda_ > 0):
self.a = 0
self.b = inf
return 1
elif (lambda_ < 0):
self.a = -inf
self.b = 0
return 1
return 0 # lambda_ = 0
def _pmf(self, k, lambda_):
fact = (1-exp(-lambda_))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return 1-exp(-lambda_*(k+1))
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(self.a, np.inf)
temp = self._cdf(vals1, lambda_)
return where(temp >= q, vals1, vals)
def _stats(self, lambda_):
mu = 1/(exp(lambda_)-1)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
l = lambda_
C = (1-exp(-l))
return l*exp(-l)/C - log(C)
planck = planck_gen(name='planck',longname='A discrete exponential ',
shapes="lamda",
extradoc="""
Planck (Discrete Exponential)
planck.pmf(k,b) = (1-exp(-b))*exp(-b*k)
for k*b >= 0
"""
)
class boltzmann_gen(rv_discrete):
def _pmf(self, k, lambda_, N):
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',longname='A truncated discrete exponential ',
shapes="lamda, N",
extradoc="""
Boltzmann (Truncated Discrete Exponential)
boltzmann.pmf(k,b,N) = (1-exp(-b))*exp(-b*k)/(1-exp(-b*N))
for k=0,..,N-1
"""
)
## Discrete Uniform
class randint_gen(rv_discrete):
def _argcheck(self, min, max):
self.a = min
self.b = max-1
return (max > min)
def _pmf(self, k, min, max):
fact = 1.0 / (max - min)
return fact
def _cdf(self, x, min, max):
k = floor(x)
return (k-min+1)*1.0/(max-min)
def _ppf(self, q, min, max):
vals = ceil(q*(max-min)+min)-1
vals1 = (vals-1).clip(min, max)
temp = self._cdf(vals1, min, max)
return where(temp >= q, vals1, vals)
def _stats(self, min, max):
m2, m1 = arr(max), arr(min)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d-1)*(d+1.0)/12.0
g1 = 0.0
g2 = -6.0/5.0*(d*d+1.0)/(d-1.0)*(d+1.0)
return mu, var, g1, g2
def _rvs(self, min, max=None):
"""An array of *size* random integers >= min and < max.
If max is None, then range is >=0 and < min
"""
return mtrand.randint(min, max, self._size)
def _entropy(self, min, max):
return log(max-min)
randint = randint_gen(name='randint',longname='A discrete uniform '\
'(random integer)', shapes="min, max",
extradoc="""
Discrete Uniform
Random integers >=min and <max.
randint.pmf(k,min, max) = 1/(max-min)
for min <= k < max.
"""
)
# Zipf distribution
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
def _rvs(self, a):
return mtrand.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
Pk = 1.0 / arr(special.zeta(a,1) * k**a)
return Pk
def _munp(self, n, a):
return special.zeta(a-n,1) / special.zeta(a,1)
def _stats(self, a):
sv = errp(0)
fac = arr(special.zeta(a,1))
mu = special.zeta(a-1.0,1)/fac
mu2p = special.zeta(a-2.0,1)/fac
var = mu2p - mu*mu
mu3p = special.zeta(a-3.0,1)/fac
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / arr(var**1.5)
mu4p = special.zeta(a-4.0,1)/fac
sv = errp(sv)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / arr(var**2) - 3.0
return mu, var, g1, g2
zipf = zipf_gen(a=1,name='zipf', longname='A Zipf',
shapes="a", extradoc="""
Zipf distribution
zipf.pmf(k,a) = 1/(zeta(a)*k**a)
for k >= 1
"""
)
# Discrete Laplacian
class dlaplace_gen(rv_discrete):
def _pmf(self, k, a):
return tanh(a/2.0)*exp(-a*abs(k))
def _cdf(self, x, a):
k = floor(x)
ind = (k >= 0)
const = exp(a)+1
return where(ind, 1.0-exp(-a*k)/const, exp(a*(k+1))/const)
def _ppf(self, q, a):
const = 1.0/(1+exp(-a))
cons2 = 1+exp(a)
ind = q < const
vals = ceil(where(ind, log(q*cons2)/a-1, -log((1-q)*cons2)/a))
vals1 = (vals-1)
temp = self._cdf(vals1, a)
return where(temp >= q, vals1, vals)
def _stats_skip(self, a):
# variance mu2 does not aggree with sample variance,
# nor with direct calculation using pmf
# remove for now because generic calculation works
# except it does not show nice zeros for mean and skew(?)
ea = exp(-a)
e2a = exp(-2*a)
e3a = exp(-3*a)
e4a = exp(-4*a)
mu2 = 2* (e2a + ea) / (1-ea)**3.0
mu4 = 2* (e4a + 11*e3a + 11*e2a + ea) / (1-ea)**5.0
return 0.0, mu2, 0.0, mu4 / mu2**2.0 - 3
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
dlaplace = dlaplace_gen(a=-inf,
name='dlaplace', longname='A discrete Laplacian',
shapes="a", extradoc="""
Discrete Laplacian distribution.
dlaplace.pmf(k,a) = tanh(a/2) * exp(-a*abs(k))
for a > 0.
"""
)
class skellam_gen(rv_discrete):
def _rvs(self, mu1, mu2):
n = self._size
return np.random.poisson(mu1, n)-np.random.poisson(mu2, n)
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0, ncx2.pdf(2*mu2, 2*(1-x), 2*mu1)*2,
ncx2.pdf(2*mu1, 2*(x+1), 2*mu2)*2)
#ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = np.floor(x)
px = np.where(x < 0, ncx2.cdf(2*mu2, -2*x, 2*mu1),
1-ncx2.cdf(2*mu1, 2*(x+1), 2*mu2))
return px
# enable later
## def _cf(self, w, mu1, mu2):
## # characteristic function
## poisscf = poisson._cf
## return poisscf(w, mu1) * poisscf(-w, mu2)
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / np.sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam',
shapes="mu1,mu2", extradoc="""
Skellam distribution
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let k1 and k2 be two Poisson-distributed r.v. with expected values
lam1 and lam2. Then, k1-k2 follows a Skellam distribution with
parameters mu1 = lam1 - rho*sqrt(lam1*lam2) and
mu2 = lam2 - rho*sqrt(lam1*lam2), where rho is the correlation
coefficient between k1 and k2. If the two Poisson-distributed r.v.
are independent then rho = 0.
Parameters mu1 and mu2 must be strictly positive.
For details see: http://en.wikipedia.org/wiki/Skellam_distribution
"""
)
|
gpl-3.0
|
l11x0m7/Paper
|
Sig2Mes/baseline/code/demodulator.py
|
1
|
12889
|
# -*- encoding=utf-8 -*-
import sys
import numpy as np
from collections import defaultdict
from matplotlib import pyplot as plt
from matplotlib.ticker import FuncFormatter, MaxNLocator
# M = 2; Fs = 8Hz; Fc = 16Hz; Nsamp=32
def ask_demodulator(y, M, Fs, Fc, Nsamp):
assert M == 2
y = np.asarray(y)
t_sin = 1. / Fs / Nsamp * np.arange(0, Nsamp)
carrier = np.sin(2 * np.pi * Fc * t_sin)
if y.ndim == 1:
y = y.reshape((1, -1))
m, n = y.shape
res = np.zeros((m, n / Nsamp))
for symbol in xrange(0, m):
for i in xrange(0, n, Nsamp):
z = np.sum(y[symbol, i:i+Nsamp] * carrier) / float(Fc)
if z > 0.5:
res[symbol, (i+Nsamp-1) / Nsamp] = 1
return res
# M = 2; freq_sep = 4; Nsamp = 32; Fs = 8; Fc = 16
def fsk_demodulator(y, M, freq_sep, Nsamp, Fs, Fc):
assert M == 2
y = np.asarray(y)
f1 = Fc - freq_sep
f2 = Fc + freq_sep
t_sin = 1. / Fs / Nsamp * np.arange(0, Nsamp)
carrier1 = np.cos(2*np.pi*f1*t_sin)
carrier2 = np.cos(2*np.pi*f2*t_sin)
if y.ndim == 1:
y = y.reshape((1, -1))
m, n = y.shape
res = np.zeros((m, n / Nsamp))
for symbol in xrange(0, m):
for i in xrange(0, n, Nsamp):
z1 = abs(np.sum(y[symbol,i:i+Nsamp] * carrier1) / Fc)
z2 = abs(np.sum(y[symbol,i:i+Nsamp] * carrier2) / Fc)
if z1 >= z2:
res[symbol, (i+Nsamp-1) / Nsamp] = 1
elif z1 < z2:
res[symbol, (i+Nsamp-1) / Nsamp] = 0
return res
# M = 2; Fs = 8Hz; Fc = 16Hz; Nsamp=32
def psk_demodulator(y, M, Fs, Fc, Nsamp):
assert M == 2
y = np.asarray(y)
t_sin = 1. / Fs / Nsamp * np.arange(0, Nsamp)
carrier = np.sin(2 * np.pi * Fc * t_sin)
if y.ndim == 1:
y = y.reshape((1, -1))
m, n = y.shape
res = np.zeros((m, n / Nsamp))
for symbol in xrange(0, m):
for i in xrange(0, n, Nsamp):
z = np.sum(y[symbol, i:i+Nsamp] * carrier) / Fc
if z < 0:
res[symbol, (i+Nsamp-1) / Nsamp] = 1
return res
def ser(y1, y2):
return float(np.sum(np.not_equal(y1, y2))) / np.prod(np.shape(y1))
def test_2ask():
signals = list()
messages = list()
with open('../../data/10dB/2ask_20000.txt', 'rb') as fr:
for line in fr:
items = line.strip().split('\t')
signal = map(float, items[0].split(','))
message = map(int, items[1].split(','))
signals.append(signal)
messages.append(message)
signals = np.asarray(signals)
messages = np.asarray(messages)
demod_signals = ask_demodulator(signals, 2, 8, 16, 32)
ber_value = ser(messages, demod_signals)
print(ber_value)
def test_2fsk():
signals = list()
messages = list()
with open('../../data/10dB/2fsk_20000.txt', 'rb') as fr:
for line in fr:
items = line.strip().split('\t')
signal = map(float, items[0].split(','))
message = map(int, items[1].split(','))
signals.append(signal)
messages.append(message)
signals = np.asarray(signals)
messages = np.asarray(messages)
demod_signals = fsk_demodulator(signals, 2, 4, 32, 8, 16)
ber_value = ser(messages, demod_signals)
print(ber_value)
def test_bpsk():
signals = list()
messages = list()
with open('../../data/10dB/bpsk_20000.txt', 'rb') as fr:
for line in fr:
items = line.strip().split('\t')
signal = map(float, items[0].split(','))
message = map(int, items[1].split(','))
signals.append(signal)
messages.append(message)
signals = np.asarray(signals)
messages = np.asarray(messages)
demod_signals = psk_demodulator(signals, 2, 8, 16, 32)
ber_value = ser(messages, demod_signals)
print(ber_value)
def test_mix_signals():
# 相关解调结果
# res = defaultdict(dict)
# x_label = set()
# with open('../../data/mix_data/mix_-10_20_210000.txt', 'rb') as fr:
# for line in fr:
# items = line.strip().split('\t')
# signal = map(float, items[0].split(','))
# message = map(int, items[1].split(','))
# snr, act_snr, st = float(items[2]), float(items[3]), items[4]
# x_label.add((snr, act_snr))
# res[st].setdefault(snr, [0, 0])
# if st == '2ASK':
# demod_signal = ask_demodulator(signal, 2, 8, 16, 32)
# res[st][snr][0] += np.sum(np.not_equal(demod_signal, message))
# res[st][snr][1] += np.prod(demod_signal.shape)
# elif st == '2FSK':
# demod_signal = fsk_demodulator(signal, 2, 4, 32, 8, 16)
# res[st][snr][0] += np.sum(np.not_equal(demod_signal, message))
# res[st][snr][1] += np.prod(demod_signal.shape)
# elif st == 'BPSK':
# demod_signal = psk_demodulator(signal, 2, 8, 16, 32)
# res[st][snr][0] += np.sum(np.not_equal(demod_signal, message))
# res[st][snr][1] += np.prod(demod_signal.shape)
# else:
# raise ValueError('Wrong signal type!')
x_label = set([(20.0, -3.1), (-10.0, -15.1), (-5.0, -10.6), (0.0, -7.0), (10.0, -3.6), (5.0, -4.7), (15.0, -3.2)])
res= {'BPSK': {0.0: [59935, 320000],
5.0: [47299, 320000],
10.0: [41405, 320000],
15.0: [39995, 320000],
20.0: [40074, 320000],
-10.0: [84622, 320000],
-5.0: [73237, 320000]},
'2ASK': {0.0: [93918, 320000],
5.0: [88687, 320000],
10.0: [84886, 320000],
15.0: [82097, 320000],
20.0: [80837, 320000],
-10.0: [107327, 320000],
-5.0: [100795, 320000]},
'2FSK': {0.0: [107452, 320000],
5.0: [111889, 320000],
10.0: [118582, 320000],
15.0: [119862, 320000],
20.0: [119546, 320000],
-10.0: [131699, 320000],
-5.0: [114997, 320000]}}
x_label = sorted(list(x_label), key=lambda k:k[0])
x_label = {float(r[0]):str(list(r)) for r in x_label}
def format_fn(tick_val, tick_pos):
if int(tick_val) in x_label:
return x_label[int(tick_val)]
else:
return ''
plt.figure()
ax = plt.subplot(111)
ax.xaxis.set_major_formatter(FuncFormatter(format_fn))
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
st_list = list()
compare_models = ''
for i, st in enumerate(res):
if st == '2ASK':
st_label = 'correlation demodulation of ' + 'BASK'
elif st == '2FSK':
st_label = 'correlation demodulation of ' + 'BFSK'
else:
st_label = 'correlation demodulation of ' + st
if i == 0:
if compare_models == '':
compare_models += 'correlation demodulation'
else:
compare_models += ' and correlation demodulation'
st_list.append(st_label)
snr2ser = dict()
for snr in res[st]:
ser = res[st][snr][0] / float(res[st][snr][1])
snr2ser[snr] = ser
print('{}\t{}\t{}'.format(st, snr, ser))
sers = zip(*sorted(snr2ser.iteritems(), key=lambda k:k[0]))[1]
plt.semilogy(range(-10, 25, 5), sers, ms=8, marker='*', lw=2, ls='-.', label=st_label)
# 模型结果(sequential model)
signal_model_snr = """
BPSK 0.0 0.129629060039
BPSK 5.0 0.0539730367793
BPSK 10.0 0.00709667487685
BPSK 15.0 0.000140625
BPSK 20.0 0.0
BPSK -10.0 0.251125562781
BPSK -5.0 0.19194828469
2ASK 0.0 0.263715277778
2ASK 5.0 0.13231180061
2ASK 10.0 0.034541868932
2ASK 15.0 0.00292721518987
2ASK 20.0 0.0
2ASK -10.0 0.345221480583
2ASK -5.0 0.335833333333
2FSK 0.0 0.148786618669
2FSK 5.0 0.0449860900354
2FSK 10.0 0.00283511722732
2FSK 15.0 0.0
2FSK 20.0 0.0
2FSK -10.0 0.388075906344
2FSK -5.0 0.27523091133
"""
# symbolic model
symbol_model_snr = """
BPSK 0.0 0.150248383166
BPSK 5.0 0.0340044882184
BPSK 10.0 0.00167436037869
BPSK 15.0 0.0
BPSK 20.0 0.0
BPSK -10.0 0.242444593687
BPSK -5.0 0.191598746082
2ASK 0.0 0.283181254221
2ASK 5.0 0.0930435596021
2ASK 10.0 0.00774805515989
2ASK 15.0 0.000249112536588
2ASK 20.0 0.0
2ASK -10.0 0.333583948123
2ASK -5.0 0.320145070887
2FSK 0.0 0.176915133308
2FSK 5.0 0.0492611606098
2FSK 10.0 0.00276938963904
2FSK 15.0 1.56067108857e-05
2FSK 20.0 0.0
2FSK -10.0 0.418979966975
2FSK -5.0 0.320570207589
"""
sequential_st_snr = defaultdict(list)
for line in signal_model_snr.strip().split('\n'):
items = line.split('\t')
sequential_st_snr[items[0]].append((float(items[1]), float(items[2])))
symbol_st_snr = defaultdict(list)
for line in symbol_model_snr.strip().split('\n'):
items = line.split('\t')
symbol_st_snr[items[0]].append((float(items[1]), float(items[2])))
for i, st in enumerate(sequential_st_snr):
if st == '2ASK':
st_label = 'sequential demodulation of ' + 'BASK'
elif st == '2FSK':
st_label = 'sequential demodulation of ' + 'BFSK'
else:
st_label = 'sequential demodulation of ' + st
st_list.append(st_label)
if i == 0:
if compare_models == '':
compare_models += 'sequential model demodulation'
else:
compare_models += ' and sequential model demodulation'
sers = zip(*sorted(sequential_st_snr[st], key=lambda k: k[0]))[1]
plt.semilogy(range(-10, 25, 5), sers, ms=6, marker='s', lw=2, ls='--', label=st_label)
for i, st in enumerate(symbol_st_snr):
if st == '2ASK':
st_label = 'symbolic demodulation of ' + 'BASK'
elif st == '2FSK':
st_label = 'symbolic demodulation of ' + 'BFSK'
else:
st_label = 'symbolic demodulation of ' + st
if i == 0:
if compare_models == '':
compare_models += 'symbolic model demodulation'
else:
compare_models += ' and symbolic model demodulation'
st_list.append(st_label)
sers = zip(*sorted(symbol_st_snr[st], key=lambda k: k[0]))[1]
plt.semilogy(range(-10, 25, 5), sers, ms=6, marker='o', lw=2, label=st_label)
# for i, st in enumerate(symbol_st_snr):
# if st == '2ASK':
# st_label = 'combined demodulation of ' + 'BASK'
# elif st == '2FSK':
# st_label = 'combined demodulation of ' + 'BFSK'
# else:
# st_label = 'combined demodulation of ' + st
# if i == 0:
# if compare_models == '':
# compare_models += 'combined model demodulation'
# else:
# compare_models += ' and combined model demodulation'
# st_list.append(st_label)
# sequential_sers = zip(*sorted(sequential_st_snr[st], key=lambda k: k[0]))[1]
# symbolic_sers = zip(*sorted(symbol_st_snr[st], key=lambda k: k[0]))[1]
# combined_sers = [min(symbolic_ser, sequential_ser)
# for (symbolic_ser, sequential_ser) in
# zip(sequential_sers, symbolic_sers)]
# plt.semilogy(range(-10, 25, 5), combined_sers, ms=6, marker='v', lw=2, ls=':', label=st_label)
plt.xticks(range(-10, 25, 5), fontsize=16)
plt.yticks(fontsize=16)
plt.xlabel('[fake signal-to-noise ratio(dB), actual signal-to-noise ratio(dB)]', fontsize=20)
plt.ylabel('bit error ratio', fontsize=20)
# plt.title('BER comparison of {} for composite signals \n'
# 'modulated with BPSK, BASK and BFSK respectively'.format(compare_models))
plt.legend(labels=st_list, fontsize=16)
plt.show()
if __name__ == '__main__':
# test_2ask()
# test_2fsk()
# test_bpsk()
test_mix_signals()
|
apache-2.0
|
fweik/espresso
|
samples/gibbs_ensemble_socket.py
|
1
|
14927
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Simulate a Gibbs-ensemble of a Lennard-Jones fluid at a fixed temperature.
This script does the Monte-Carlo part of the Gibbs-ensemble, however for the
energy calculation of the systems, two instances of the :file:`gibbs_ensemble_client.py`
script are executed, which each run a different instance of ESPResSo.
The Gibbs-ensemble implemented in these scripts closely refers to chapter 8 of
Daan Fenkel and Berend Smit 'Understanding Molecular Simulation, Second edition'.
All equation references noted in this script can be found there.
Due to the cutoff and shifting of the LJ-potential the simulated points in the
phase diagram differ from the long range uncut LJ-potential. The phase diagram
of the used potential can be found in 'B. Smit, J. Chem. Phys. 96 (11), 1992,
Phase diagrams of Lennard-Jones fluids'.
"""
import socket
import numpy as np
import pickle
import subprocess
import struct
import random
import matplotlib.pyplot as plt
import argparse
from espressomd import assert_features
assert_features("LENNARD_JONES")
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--seed', type=int, nargs=1)
parser.add_argument('-S', '--steps', type=int, nargs=1)
parser.add_argument('-w', '--warm_up_steps', type=int, nargs=1)
parser.add_argument('-E', '--espresso-executable', nargs=1)
parser.add_argument('-C', '--client-script', nargs=1)
parser.add_argument('-n', '--number-of-particles', type=int, nargs=1)
parser.add_argument('-l', '--box-length', type=float, nargs=1)
parser.add_argument('-T', '--temperature', type=float, nargs=1)
args = parser.parse_args()
# system parameters
seed = None
steps = 50000
warm_up_steps = 1000
# number of particles in both boxes combined
global_num_particles = 256
# starting box length
init_box_l = 7.55
# temperature
kT = 1.15
# maximum of the volume exchanged in the logarithmic space
DV_MAX = 0.5
PARTICLE_RADIUS = 0.5
# LJ-parameters
LJ_EPSILON = 1.0
LJ_SIGMA = 2.0 * PARTICLE_RADIUS
LJ_CUTOFF = 2.5
LJ_SHIFT = 0.0
# Monte-Carlo parameters
INIT_MOVE_CHANCE = 0.16
EXCHANGE_CHANCE = 0.8
VOLUME_CHANCE = 1.0 - INIT_MOVE_CHANCE - EXCHANGE_CHANCE
# socket parameters
HOST = 'localhost'
PORT = 31415
NUMBER_OF_CLIENTS = 2
# Message identifiers
MSG_START = 0
MSG_END = 1
MSG_MOVE_PART = 2
MSG_MOVE_PART_REVERT = 21
MSG_CHANGE_VOLUME = 3
MSG_EXCHANGE_PART_ADD = 4
MSG_EXCHANGE_PART_ADD_REVERT = 41
MSG_EXCHANGE_PART_REMOVE = 5
MSG_EXCHANGE_PART_REMOVE_REVERT = 51
MSG_ENERGY = 6
# script locations
espresso_executable = "../pypresso"
client_script = "./gibbs_ensemble_client.py"
if args.seed:
seed = args.seed[0]
if args.steps:
steps = args.steps[0]
if args.warm_up_steps:
warm_up_steps = args.warm_up_steps[0]
if args.espresso_executable:
espresso_executable = args.espresso_executable[0]
if args.client_script:
client_script = args.client_script[0]
if args.number_of_particles:
global_num_particles = args.number_of_particles[0]
if args.temperature:
kT = args.temperature[0]
if args.box_length:
init_box_l = args.box_length[0]
global_volume = 2.0 * init_box_l**3
class Box:
'''
Box class, which contains the data of one system and controls the
connection to the respective client.
'''
box_l = init_box_l
box_l_old = init_box_l
n_particles = int(global_num_particles / 2)
energy = 0.0
energy_corrected = 0.0
energy_old = 1.0e100
conn = 0
adr = 0
def recv_energy(self):
'''Received the energy data from the client.'''
msg = self.recv_data()
if msg[0] == MSG_ENERGY:
self.energy = msg[1]
return 0
else:
print("ERROR during energy recv")
return 1
def recv_data(self):
'''Received the data send from the client.'''
# The first 4 bytes encode the length of the messages received.
buf = b''
while len(buf) < 4:
buf += self.conn.recv(4 - len(buf))
length = struct.unpack('!I', buf)[0]
d = self.conn.recv(length)
msg = pickle.loads(d)
return(msg)
def send_data(self, data):
'''Send the data packet to the client.'''
# The first 4 bytes encode the length of the messages sent.
length = struct.pack('>I', len(data))
packet = length + data
self.conn.send(packet)
def calc_tail_correction(box, lj_epsilon, lj_sigma, lj_cutoff):
'''
Calculates the tail correction to the energies of the box.
'''
# eq 3.2.5
return 8.0 / 3.0 * np.pi * box.n_particles / box.box_l**3 * lj_epsilon * \
lj_sigma**3 * (1.0 / 3.0 * np.power(lj_cutoff / lj_sigma, -9) -
np.power(lj_cutoff / lj_sigma, -3))
def calc_shift_correction(box, lj_epsilon, lj_cutoff, lj_shift):
'''
Calculates the shift correction to the energies of the box.
'''
# difference in the potential integrated from 0 to cutoff distance
return -8.0 / 3.0 * np.pi * box.n_particles / box.box_l**3 * \
lj_epsilon * np.power(lj_cutoff, 3) * 4.0 * lj_shift
def move_particle(boxes, global_num_particles):
'''
Tries a displacement move and stores the new energy in the corresponding box
'''
if random.randint(0, global_num_particles - 1) < boxes[0].n_particles:
rand_box = 0
else:
rand_box = 1
boxes[rand_box].send_data(pickle.dumps([MSG_MOVE_PART, 0]))
boxes[rand_box].recv_energy()
return rand_box
def exchange_volume(boxes, global_volume):
'''
Tries a volume exchange move and stores the new energy in the boxes
'''
boxes[0].box_l_old = boxes[0].box_l
boxes[1].box_l_old = boxes[1].box_l
# calculate the exchanged volume
rand_box = random.randint(0, NUMBER_OF_CLIENTS - 1)
vol2 = global_volume - boxes[rand_box].box_l**3
lnvn = np.log(boxes[rand_box].box_l**3 / vol2) + \
(random.random() - 0.5) * DV_MAX
vol1 = global_volume * np.exp(lnvn) / (1 + np.exp(lnvn))
vol2 = global_volume - vol1
boxes[rand_box].box_l = np.cbrt(vol1)
boxes[rand_box].send_data(pickle.dumps(
[MSG_CHANGE_VOLUME, boxes[rand_box].box_l]))
boxes[(rand_box + 1) % 2].box_l = np.cbrt(vol2)
boxes[(rand_box + 1) % 2].send_data(pickle.dumps([MSG_CHANGE_VOLUME,
boxes[(rand_box + 1) % 2].box_l]))
boxes[rand_box].recv_energy()
boxes[(rand_box + 1) % 2].recv_energy()
return rand_box
def exchange_particle(boxes):
'''
Tries a particle exchange move and stores the new energy in the boxes
'''
rand_box = random.randint(0, 1)
if boxes[rand_box].n_particles == 0:
rand_box = (rand_box + 1) % 2
boxes[rand_box].n_particles -= 1
boxes[(rand_box + 1) % 2].n_particles += 1
boxes[rand_box].send_data(pickle.dumps([MSG_EXCHANGE_PART_REMOVE, 0]))
boxes[(rand_box + 1) %
2].send_data(pickle.dumps([MSG_EXCHANGE_PART_ADD, 0]))
boxes[rand_box].recv_energy()
boxes[(rand_box + 1) % 2].recv_energy()
return rand_box
def check_make_move(boxes, inner_potential, rand_box):
'''
Returns whether the last displacement move was valid or not and reverts the changes
if it was invalid.
'''
if random.random() > inner_potential:
boxes[rand_box].send_data(pickle.dumps([MSG_MOVE_PART_REVERT, 0]))
boxes[rand_box].recv_energy()
return False
return True
def check_exchange_volume(boxes, inner_potential):
'''
Returns whether the last volume exchange move was valid or not and reverts the changes
if it was invalid.
'''
volume_factor = \
(boxes[0].box_l**3 / boxes[0].box_l_old**3)**(boxes[0].n_particles + 1) * \
(boxes[1].box_l**3 / boxes[1].box_l_old **
3)**(boxes[1].n_particles + 1)
if random.random() > volume_factor * inner_potential:
boxes[0].send_data(pickle.dumps(
[MSG_CHANGE_VOLUME, boxes[0].box_l_old]))
boxes[0].box_l = boxes[0].box_l_old
boxes[1].send_data(pickle.dumps(
[MSG_CHANGE_VOLUME, boxes[1].box_l_old]))
boxes[1].box_l = boxes[1].box_l_old
boxes[0].recv_energy()
boxes[1].recv_energy()
return False
return True
def check_exchange_particle(boxes, inner_potential, rand_box):
'''
Returns whether the last particle exchange move was valid or not and reverts the changes
if it was invalid.
'''
exchange_factor = (boxes[rand_box].n_particles) / \
(boxes[(rand_box + 1) % 2].n_particles + 1.0) * \
boxes[(rand_box + 1) % 2].box_l**3 / boxes[rand_box].box_l**3
if random.random() > exchange_factor * inner_potential:
boxes[rand_box].n_particles += 1
boxes[(rand_box + 1) % 2].n_particles -= 1
boxes[rand_box].send_data(pickle.dumps(
[MSG_EXCHANGE_PART_REMOVE_REVERT, 0]))
boxes[(rand_box + 1) %
2].send_data(pickle.dumps([MSG_EXCHANGE_PART_ADD_REVERT, 0]))
boxes[rand_box].recv_energy()
boxes[(rand_box + 1) % 2].recv_energy()
return False
return True
# init socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((HOST, PORT))
s.listen(NUMBER_OF_CLIENTS)
boxes = []
random.seed(seed)
# start clients and connections
for i in range(NUMBER_OF_CLIENTS):
boxes.append(Box())
lj_arguments = ["-lj", str(LJ_EPSILON), str(LJ_SIGMA), str(LJ_CUTOFF),
str(LJ_SHIFT)]
arguments = ["-n", str(boxes[i].n_particles), "-s",
str(random.randint(0, np.iinfo(np.int32).max)), "-bl",
str(boxes[i].box_l)]
subprocess.Popen([espresso_executable] + [client_script] +
arguments + lj_arguments)
boxes[i].conn, boxes[i].adr = s.accept()
boxes[i].send_data(pickle.dumps([MSG_START, 0]))
boxes[i].recv_energy()
boxes[i].energy_old = boxes[i].energy + \
calc_tail_correction(boxes[i], LJ_EPSILON, LJ_SIGMA, LJ_CUTOFF) + \
calc_shift_correction(boxes[i], LJ_EPSILON, LJ_CUTOFF, LJ_SHIFT)
# observables
densities = [[], []]
list_indices = []
accepted_steps = 0
accepted_steps_move = 1
accepted_steps_volume = 1
accepted_steps_exchange = 1
move_steps_tried = 1
volume_steps_tried = 1
exchange_steps_tried = 1
# rand_box defines on which box the move acts on.
rand_box = 0
# only do displacements during the warm up
move_chance = 1.0
# Monte-Carlo loop
for i in range(steps):
print("\rIntegrating: {0:3.0f}%".format(
100 * i / steps), end='', flush=True)
# warm up ends after 1000 steps
if i == warm_up_steps:
move_chance = INIT_MOVE_CHANCE
# rand defines which move to make.
rand = random.random()
# choose step and send the command to execute to the clients, then receive
# the energies.
if rand <= move_chance:
rand_box = move_particle(boxes, global_num_particles)
move_steps_tried += 1
elif rand <= move_chance + VOLUME_CHANCE:
rand_box = exchange_volume(boxes, global_volume)
volume_steps_tried += 1
else:
rand_box = exchange_particle(boxes)
exchange_steps_tried += 1
# calculate the correction energies of the lj tail and shift.
if rand <= move_chance:
boxes[rand_box].energy_corrected = boxes[rand_box].energy + \
calc_tail_correction(boxes[rand_box], LJ_EPSILON, LJ_SIGMA, LJ_CUTOFF) + \
calc_shift_correction(
boxes[rand_box],
LJ_EPSILON,
LJ_CUTOFF,
LJ_SHIFT)
boxes[(rand_box + 1) % 2].energy_corrected = \
boxes[(rand_box + 1) % 2].energy_old
else:
boxes[0].energy_corrected = boxes[0].energy + \
calc_tail_correction(boxes[0], LJ_EPSILON, LJ_SIGMA, LJ_CUTOFF) + \
calc_shift_correction(boxes[0], LJ_EPSILON, LJ_CUTOFF, LJ_SHIFT)
boxes[1].energy_corrected = boxes[1].energy + \
calc_tail_correction(boxes[1], LJ_EPSILON, LJ_SIGMA, LJ_CUTOFF) + \
calc_shift_correction(boxes[1], LJ_EPSILON, LJ_CUTOFF, LJ_SHIFT)
# test if the move will be accepted and undo the last step if it was not
# accepted.
delta_energy = boxes[0].energy_corrected + boxes[1].energy_corrected - \
boxes[0].energy_old - boxes[1].energy_old
# limitation to delta_energy to circumvent calculating the exponential of
# too large inner potentials, which could cause some error messages.
if delta_energy < -10.0 * kT:
delta_energy = -10.0 * kT
inner_potential = np.exp(-1.0 / kT * delta_energy)
accepted = True
if rand <= move_chance:
accepted = check_make_move(boxes, inner_potential, rand_box)
elif rand <= move_chance + VOLUME_CHANCE:
accepted = check_exchange_volume(boxes, inner_potential)
else:
accepted = check_exchange_particle(boxes, inner_potential, rand_box)
if accepted:
# keep the changes.
boxes[0].energy_old = boxes[0].energy_corrected
boxes[1].energy_old = boxes[1].energy_corrected
accepted_steps += 1
if rand <= move_chance:
accepted_steps_move += 1
elif rand <= move_chance + VOLUME_CHANCE:
accepted_steps_volume += 1
else:
accepted_steps_exchange += 1
densities[0].append(boxes[0].n_particles / boxes[0].box_l**3)
densities[1].append(boxes[1].n_particles / boxes[1].box_l**3)
list_indices.append(i)
print("Acceptance rate global:\t {}".format(accepted_steps / float(steps)))
print("Acceptance rate moving:\t {}".format(
accepted_steps_move / float(move_steps_tried)))
print("Acceptance rate volume exchange:\t {}".format(
accepted_steps_volume / float(volume_steps_tried)))
print("Acceptance rate particle exchange:\t {}".format(
accepted_steps_exchange / float(exchange_steps_tried)))
plt.figure()
plt.ylabel('density')
plt.xlabel('number of steps')
plt.plot(list_indices[100:], densities[0][100:], label="box 0")
plt.plot(list_indices[100:], densities[1][100:], label="box 1")
plt.legend()
plt.show()
# closing the socket
for i in range(NUMBER_OF_CLIENTS):
boxes[i].send_data(pickle.dumps([MSG_END, 0]))
s.close()
|
gpl-3.0
|
DGrady/pandas
|
asv_bench/benchmarks/replace.py
|
6
|
2197
|
from .pandas_vb_common import *
class replace_fillna(object):
goal_time = 0.2
def setup(self):
self.N = 1000000
try:
self.rng = date_range('1/1/2000', periods=self.N, freq='min')
except NameError:
self.rng = DatetimeIndex('1/1/2000', periods=self.N, offset=datetools.Minute())
self.date_range = DateRange
self.ts = Series(np.random.randn(self.N), index=self.rng)
def time_replace_fillna(self):
self.ts.fillna(0.0, inplace=True)
class replace_large_dict(object):
goal_time = 0.2
def setup(self):
self.n = (10 ** 6)
self.start_value = (10 ** 5)
self.to_rep = dict(((i, (self.start_value + i)) for i in range(self.n)))
self.s = Series(np.random.randint(self.n, size=(10 ** 3)))
def time_replace_large_dict(self):
self.s.replace(self.to_rep, inplace=True)
class replace_convert(object):
goal_time = 0.5
def setup(self):
self.n = (10 ** 3)
self.to_ts = dict(((i, pd.Timestamp(i)) for i in range(self.n)))
self.to_td = dict(((i, pd.Timedelta(i)) for i in range(self.n)))
self.s = Series(np.random.randint(self.n, size=(10 ** 3)))
self.df = DataFrame({'A': np.random.randint(self.n, size=(10 ** 3)),
'B': np.random.randint(self.n, size=(10 ** 3))})
def time_replace_series_timestamp(self):
self.s.replace(self.to_ts)
def time_replace_series_timedelta(self):
self.s.replace(self.to_td)
def time_replace_frame_timestamp(self):
self.df.replace(self.to_ts)
def time_replace_frame_timedelta(self):
self.df.replace(self.to_td)
class replace_replacena(object):
goal_time = 0.2
def setup(self):
self.N = 1000000
try:
self.rng = date_range('1/1/2000', periods=self.N, freq='min')
except NameError:
self.rng = DatetimeIndex('1/1/2000', periods=self.N, offset=datetools.Minute())
self.date_range = DateRange
self.ts = Series(np.random.randn(self.N), index=self.rng)
def time_replace_replacena(self):
self.ts.replace(np.nan, 0.0, inplace=True)
|
bsd-3-clause
|
subodhchhabra/airflow
|
docs/conf.py
|
10
|
9098
|
# -*- coding: utf-8 -*-
#
# Airflow documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 9 20:50:01 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import mock
MOCK_MODULES = [
'apiclient',
'apiclient.discovery',
'apiclient.http',
'mesos',
'mesos.interface',
'mesos.native',
'google.auth.default',
'google_auth_httplib2',
'google.oauth2.service_account',
'pandas.io.gbq',
'vertica_python',
'pymssql'
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# Hack to allow changing for piece of the code to behave differently while
# the docs are being built. The main objective was to alter the
# behavior of the utils.apply_default that was hiding function headers
os.environ['BUILDING_AIRFLOW_DOCS'] = 'TRUE'
from airflow import settings
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinxarg.ext',
]
autodoc_default_flags = ['show-inheritance', 'members']
viewcode_import = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Airflow'
#copyright = u''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = '1.0.0'
# The full version, including alpha/beta/rc tags.
#release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Airflow Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = ""
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Airflowdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Airflow.tex', u'Airflow Documentation',
u'Apache Airflow', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'airflow', u'Airflow Documentation',
[u'Apache Airflow'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [(
'index', 'Airflow', u'Airflow Documentation',
u'Apache Airflow', 'Airflow',
'Airflow is a system to programmaticaly author, schedule and monitor data pipelines.',
'Miscellaneous'
),]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
apache-2.0
|
blaze/dask
|
dask/dataframe/io/utils.py
|
1
|
3139
|
import pandas as pd
import json
from uuid import uuid4
def _get_pyarrow_dtypes(schema, categories):
"""Convert a pyarrow.Schema object to pandas dtype dict"""
# Check for pandas metadata
has_pandas_metadata = schema.metadata is not None and b"pandas" in schema.metadata
if has_pandas_metadata:
pandas_metadata = json.loads(schema.metadata[b"pandas"].decode("utf8"))
pandas_metadata_dtypes = {
c.get("field_name", c.get("name", None)): c["numpy_type"]
for c in pandas_metadata.get("columns", [])
}
tz = {
c.get("field_name", c.get("name", None)): c["metadata"].get(
"timezone", None
)
for c in pandas_metadata.get("columns", [])
if c["metadata"]
}
else:
pandas_metadata_dtypes = {}
dtypes = {}
for i in range(len(schema)):
field = schema[i]
# Get numpy_dtype from pandas metadata if available
if field.name in pandas_metadata_dtypes:
if field.name in tz:
numpy_dtype = (
pd.Series([], dtype="M8[ns]").dt.tz_localize(tz[field.name]).dtype
)
else:
numpy_dtype = pandas_metadata_dtypes[field.name]
else:
try:
numpy_dtype = field.type.to_pandas_dtype()
except NotImplementedError:
continue # Skip this field (in case we aren't reading it anyway)
dtypes[field.name] = numpy_dtype
if categories:
for cat in categories:
dtypes[cat] = "category"
return dtypes
def _meta_from_dtypes(to_read_columns, file_dtypes, index_cols, column_index_names):
"""Get the final metadata for the dask.dataframe
Parameters
----------
to_read_columns : list
All the columns to end up with, including index names
file_dtypes : dict
Mapping from column name to dtype for every element
of ``to_read_columns``
index_cols : list
Subset of ``to_read_columns`` that should move to the
index
column_index_names : list
The values for df.columns.name for a MultiIndex in the
columns, or df.index.name for a regular Index in the columns
Returns
-------
meta : DataFrame
"""
meta = pd.DataFrame(
{c: pd.Series([], dtype=d) for (c, d) in file_dtypes.items()},
columns=to_read_columns,
)
df = meta[list(to_read_columns)]
if len(column_index_names) == 1:
df.columns.name = column_index_names[0]
if not index_cols:
return df
if not isinstance(index_cols, list):
index_cols = [index_cols]
df = df.set_index(index_cols)
# XXX: this means we can't roundtrip dataframes where the index names
# is actually __index_level_0__
if len(index_cols) == 1 and index_cols[0] == "__index_level_0__":
df.index.name = None
if len(column_index_names) > 1:
df.columns.names = column_index_names
return df
def _guid():
"""Simple utility function to get random hex string"""
return uuid4().hex
|
bsd-3-clause
|
xwolf12/scikit-learn
|
examples/ensemble/plot_forest_importances_faces.py
|
403
|
1519
|
"""
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
|
bsd-3-clause
|
nvoron23/scikit-learn
|
sklearn/metrics/tests/test_classification.py
|
53
|
49781
|
from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import label_binarize
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
assert_dep_warning = partial(assert_warns, DeprecationWarning)
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings),
({'average': 'micro'}, assert_dep_warning)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
@ignore_warnings
def test_precision_recall_f_extra_labels():
# Test handling of explicit additional (not in input) labels to PRF
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average=None)
assert_array_almost_equal([0., 1., 1., .5, 0.], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average='macro')
assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual)
# No effect otheriwse
for average in ['micro', 'weighted', 'samples']:
if average == 'samples' and i == 0:
continue
assert_almost_equal(recall_score(y_true, y_pred,
labels=[0, 1, 2, 3, 4],
average=average),
recall_score(y_true, y_pred, labels=None,
average=average))
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, 'macro', 'micro', 'samples']:
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(6), average=average)
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(-1, 4), average=average)
@ignore_warnings
def test_precision_recall_f_ignored_labels():
# Test a subset of labels may be requested for PRF
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([.5, 1.], recall_13(average=None))
assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro'))
assert_almost_equal((.5 * 2 + 1. * 1) / 3,
recall_13(average='weighted'))
assert_almost_equal(2. / 3, recall_13(average='micro'))
# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert_not_equal(recall_13(average=average),
recall_all(average=average))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situtation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='macro')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
def test_cohen_kappa():
# These label vectors reproduce the contingency matrix from Artstein and
# Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]).
y1 = np.array([0] * 40 + [1] * 60)
y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50)
kappa = cohen_kappa_score(y1, y2)
assert_almost_equal(kappa, .348, decimal=3)
assert_equal(kappa, cohen_kappa_score(y2, y1))
# Add spurious labels and ignore them.
y1 = np.append(y1, [2] * 4)
y2 = np.append(y2, [2] * 4)
assert_equal(cohen_kappa_score(y1, y2, labels=[0, 1]), kappa)
assert_almost_equal(cohen_kappa_score(y1, y1), 1.)
# Multiclass example: Artstein and Poesio, Table 4.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 52 + [1] * 32 + [2] * 16)
assert_almost_equal(cohen_kappa_score(y1, y2), .8013, decimal=4)
@ignore_warnings
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_multiclass_pos_label_none():
# Test Precision Recall and F1 Score for multiclass classification task
# GH Issue #1296
# initialize data
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
pos_label=None,
average='weighted')
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='weighted'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='weighted'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
if np_version[:3] < (1, 7, 0):
expected_message = ("NumPy < 1.7.0 does not implement"
" searchsorted on unicode data correctly.")
assert_raise_message(RuntimeError, expected_message,
classification_report, y_true, y_pred)
else:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
_, y_true = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=0)
_, y_pred = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=1)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, 1 - y2), 1)
assert_equal(hamming_loss(y1, 1 - y1), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1]])
y_pred = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [1, 0, 1, 0]])
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
# tp = [0, 1, 1, 0]
# fn = [1, 0, 0, 1]
# fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weighted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check samples
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 1], [0, 0, 0, 1], [1, 1, 0, 0]])
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check samples
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
@ignore_warnings
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true = np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 0], [0, 0, 0, 1], [0, 1, 1, 0]])
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single postive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='macro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='macro')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_compat():
# Ensure warning if f1_score et al.'s average is implicit for multiclass
y_true = [1, 2, 3, 3]
y_pred = [1, 2, 3, 1]
y_true_bin = [0, 1, 1]
y_pred_bin = [0, 1, 0]
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
score = assert_warns(DeprecationWarning, metric, y_true, y_pred)
score_weighted = assert_no_warnings(metric, y_true, y_pred,
average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default')
# check binary passes without warning
assert_no_warnings(metric, y_true_bin, y_pred_bin)
# but binary with pos_label=None should behave like multiclass
score = assert_warns(DeprecationWarning, metric,
y_true_bin, y_pred_bin, pos_label=None)
score_weighted = assert_no_warnings(metric, y_true_bin, y_pred_bin,
pos_label=None, average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default with '
'binary data and pos_label=None')
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Can't handle mix of {0} and {1}".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
# Make sure seq of seq is not supported
y1 = [(1, 2,), (0, 2, 3)]
y2 = [(2,), (0, 2,)]
msg = ('You appear to be using a legacy multi-label data representation. '
'Sequence of sequences are no longer supported; use a binary array'
' or sparse matrix instead.')
assert_raise_message(ValueError, msg, _check_targets, y1, y2)
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[+1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, +0.24],
[-2.36, -0.79, -0.27, +0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
|
bsd-3-clause
|
CDTjamie/The_Boids
|
boids/bad_boids.py
|
1
|
5258
|
from matplotlib import pyplot as plt
from matplotlib import animation
import random
import numpy as np
import yaml
# Deliberately terrible code for teaching purposes
config = yaml.load(open("boids/config.yaml"))
boid_number = config["boid_number"]
x_position_limits = config["x_position_limits"]
y_position_limits = config["y_position_limits"]
x_velocity_limits = config["x_velocity_limits"]
y_velocity_limits = config["y_velocity_limits"]
avoid_distance = config["avoid_distance"]
match_speed_distance = config["match_speed_distance"]
middle_scaling = config["middle_scaling"]
match_scaling = config["match_scaling"]
class Flock(object):
def __init__(self, boid_number, x_position_limits, y_position_limits, x_velocity_limits, y_velocity_limits):
self.boid_number = boid_number
self.x_position_limits = x_position_limits
self.y_position_limits = y_position_limits
self.x_velocity_limits = x_velocity_limits
self.y_velocity_limits = y_velocity_limits
def initialise(self, limits, boids):
values = [random.uniform(limits[0], limits[1]) for x in boids]
return values
def new_flock(self):
boids = range(self.boid_number)
x_positions = self.initialise(self.x_position_limits, boids)
y_positions = self.initialise(self.y_position_limits, boids)
x_velocities = self.initialise(self.x_velocity_limits, boids)
y_velocities = self.initialise(self.y_velocity_limits, boids)
boid_positions = (x_positions, y_positions)
boid_velocities = (x_velocities, y_velocities)
return boid_positions, boid_velocities
myflock = Flock(boid_number, x_position_limits, y_position_limits, x_velocity_limits, y_velocity_limits)
boid_positions, boid_velocities = myflock.new_flock()
figure = plt.figure()
axes = plt.axes(xlim=(-500, 1500), ylim=(-500,1500))
scatter = axes.scatter(boid_positions[0], boid_positions[1])
class Flight(object):
def __init__(self, boid_number, boid_positions, boid_velocities, avoid_distance, match_speed_distance, middle_scaling, match_scaling):
self.boid_number = boid_number
self.boid_positions = boid_positions
self.boid_velocities = boid_velocities
self.avoid_distance = avoid_distance
self.match_speed_distance = match_speed_distance
self.middle_scaling = middle_scaling
self.match_scaling = match_scaling
def proximity(self, i, j, boid_positions, boid_velocities, distance):
return (boid_positions[0][j]-boid_positions[0][i])**2 + (boid_positions[1][j]-boid_positions[1][i])**2 < distance
def fly_towards_middle(self, i, j, boid_positions, boid_velocities):
boid_velocities[0][i] = boid_velocities[0][i]+(boid_positions[0][j]-boid_positions[0][i])*self.middle_scaling/self.boid_number
boid_velocities[1][i] = boid_velocities[1][i]+(boid_positions[1][j]-boid_positions[1][i])*self.middle_scaling/self.boid_number
return boid_positions, boid_velocities
def avoid_boids(self, i, j, boid_positions, boid_velocities):
if self.proximity(i,j,boid_positions,boid_velocities,self.avoid_distance):
boid_velocities[0][i] = boid_velocities[0][i]+(boid_positions[0][i]-boid_positions[0][j])
boid_velocities[1][i] = boid_velocities[1][i]+(boid_positions[1][i]-boid_positions[1][j])
return boid_positions, boid_velocities
def match_speed(self, i, j, boid_positions, boid_velocities):
if self.proximity(i,j,boid_positions,boid_velocities,self.match_speed_distance):
boid_velocities[0][i] = boid_velocities[0][i]+(boid_velocities[0][j]-boid_velocities[0][i])*self.match_scaling/self.boid_number
boid_velocities[1][i] = boid_velocities[1][i]+(boid_velocities[1][j]-boid_velocities[1][i])*self.match_scaling/self.boid_number
return boid_positions, boid_velocities
def move(self, boid_positions, boid_velocities, i):
boid_positions[0][i] = boid_positions[0][i]+boid_velocities[0][i]
boid_positions[1][i] = boid_positions[1][i]+boid_velocities[1][i]
return boid_positions
def update_boids(self):
boids = range(self.boid_number)
for i in boids:
for j in boids:
self.fly_towards_middle(i,j,self.boid_positions, self.boid_velocities)
self.avoid_boids(i,j,self.boid_positions, self.boid_velocities)
self.match_speed(i,j,self.boid_positions, self.boid_velocities)
for i in boids:
boid_positions = self.move(self.boid_positions, self.boid_velocities, i)
return boid_positions, boid_velocities
myflight = Flight(boid_number, boid_positions, boid_velocities, avoid_distance, match_speed_distance, middle_scaling, match_scaling)
def animate(frame):
boid_positions, boid_velocities = myflight.update_boids()
x_pos = np.array(boid_positions[0])
y_pos = np.array(boid_positions[1])
data = np.hstack((x_pos[:,np.newaxis], y_pos[:, np.newaxis]))
scatter.set_offsets(data)
anim = animation.FuncAnimation(figure, animate, frames=50, interval=50)
if __name__ =="__main__":
plt.show()
|
mit
|
BiaDarkia/scikit-learn
|
sklearn/decomposition/nmf.py
|
3
|
45741
|
""" Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck
# Mathieu Blondel <[email protected]>
# Tom Dupre la Tour
# License: BSD 3 clause
from __future__ import division, print_function
from math import sqrt
import warnings
import numbers
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.extmath import safe_min
from ..utils.validation import check_is_fitted, check_non_negative
from ..exceptions import ConvergenceWarning
from .cdnmf_fast import _update_cdnmf_fast
EPSILON = np.finfo(np.float32).eps
INTEGER_TYPES = (numbers.Integral, np.integer)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _check_init(A, shape, whom):
A = check_array(A)
if np.shape(A) != shape:
raise ValueError('Array with wrong shape passed to %s. Expected %s, '
'but got %s ' % (whom, shape, np.shape(A)))
check_non_negative(A, whom)
if np.max(A) == 0:
raise ValueError('Array passed to %s is full of zeros.' % whom)
def _beta_divergence(X, W, H, beta, square_root=False):
"""Compute the beta-divergence of X and dot(W, H).
Parameters
----------
X : float or array-like, shape (n_samples, n_features)
W : float or dense array-like, shape (n_samples, n_components)
H : float or dense array-like, shape (n_components, n_features)
beta : float, string in {'frobenius', 'kullback-leibler', 'itakura-saito'}
Parameter of the beta-divergence.
If beta == 2, this is half the Frobenius *squared* norm.
If beta == 1, this is the generalized Kullback-Leibler divergence.
If beta == 0, this is the Itakura-Saito divergence.
Else, this is the general beta-divergence.
square_root : boolean, default False
If True, return np.sqrt(2 * res)
For beta == 2, it corresponds to the Frobenius norm.
Returns
-------
res : float
Beta divergence of X and np.dot(X, H)
"""
beta = _beta_loss_to_float(beta)
# The method can be called with scalars
if not sp.issparse(X):
X = np.atleast_2d(X)
W = np.atleast_2d(W)
H = np.atleast_2d(H)
# Frobenius norm
if beta == 2:
# Avoid the creation of the dense np.dot(W, H) if X is sparse.
if sp.issparse(X):
norm_X = np.dot(X.data, X.data)
norm_WH = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
res = (norm_X + norm_WH - 2. * cross_prod) / 2.
else:
res = squared_norm(X - np.dot(W, H)) / 2.
if square_root:
return np.sqrt(res * 2)
else:
return res
if sp.issparse(X):
# compute np.dot(W, H) only where X is nonzero
WH_data = _special_sparse_dot(W, H, X).data
X_data = X.data
else:
WH = np.dot(W, H)
WH_data = WH.ravel()
X_data = X.ravel()
# do not affect the zeros: here 0 ** (-1) = 0 and not infinity
indices = X_data > EPSILON
WH_data = WH_data[indices]
X_data = X_data[indices]
# used to avoid division by zero
WH_data[WH_data == 0] = EPSILON
# generalized Kullback-Leibler divergence
if beta == 1:
# fast and memory efficient computation of np.sum(np.dot(W, H))
sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1))
# computes np.sum(X * log(X / WH)) only where X is nonzero
div = X_data / WH_data
res = np.dot(X_data, np.log(div))
# add full np.sum(np.dot(W, H)) - np.sum(X)
res += sum_WH - X_data.sum()
# Itakura-Saito divergence
elif beta == 0:
div = X_data / WH_data
res = np.sum(div) - np.product(X.shape) - np.sum(np.log(div))
# beta-divergence, beta not in (0, 1, 2)
else:
if sp.issparse(X):
# slow loop, but memory efficient computation of :
# np.sum(np.dot(W, H) ** beta)
sum_WH_beta = 0
for i in range(X.shape[1]):
sum_WH_beta += np.sum(np.dot(W, H[:, i]) ** beta)
else:
sum_WH_beta = np.sum(WH ** beta)
sum_X_WH = np.dot(X_data, WH_data ** (beta - 1))
res = (X_data ** beta).sum() - beta * sum_X_WH
res += sum_WH_beta * (beta - 1)
res /= beta * (beta - 1)
if square_root:
return np.sqrt(2 * res)
else:
return res
def _special_sparse_dot(W, H, X):
"""Computes np.dot(W, H), only where X is non zero."""
if sp.issparse(X):
ii, jj = X.nonzero()
dot_vals = np.multiply(W[ii, :], H.T[jj, :]).sum(axis=1)
WH = sp.coo_matrix((dot_vals, (ii, jj)), shape=X.shape)
return WH.tocsr()
else:
return np.dot(W, H)
def _compute_regularization(alpha, l1_ratio, regularization):
"""Compute L1 and L2 regularization coefficients for W and H"""
alpha_H = 0.
alpha_W = 0.
if regularization in ('both', 'components'):
alpha_H = float(alpha)
if regularization in ('both', 'transformation'):
alpha_W = float(alpha)
l1_reg_W = alpha_W * l1_ratio
l1_reg_H = alpha_H * l1_ratio
l2_reg_W = alpha_W * (1. - l1_ratio)
l2_reg_H = alpha_H * (1. - l1_ratio)
return l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H
def _check_string_param(solver, regularization, beta_loss, init):
allowed_solver = ('cd', 'mu')
if solver not in allowed_solver:
raise ValueError(
'Invalid solver parameter: got %r instead of one of %r' %
(solver, allowed_solver))
allowed_regularization = ('both', 'components', 'transformation', None)
if regularization not in allowed_regularization:
raise ValueError(
'Invalid regularization parameter: got %r instead of one of %r' %
(regularization, allowed_regularization))
# 'mu' is the only solver that handles other beta losses than 'frobenius'
if solver != 'mu' and beta_loss not in (2, 'frobenius'):
raise ValueError(
'Invalid beta_loss parameter: solver %r does not handle beta_loss'
' = %r' % (solver, beta_loss))
if solver == 'mu' and init == 'nndsvd':
warnings.warn("The multiplicative update ('mu') solver cannot update "
"zeros present in the initialization, and so leads to "
"poorer results when used jointly with init='nndsvd'. "
"You may try init='nndsvda' or init='nndsvdar' instead.",
UserWarning)
beta_loss = _beta_loss_to_float(beta_loss)
return beta_loss
def _beta_loss_to_float(beta_loss):
"""Convert string beta_loss to float"""
allowed_beta_loss = {'frobenius': 2,
'kullback-leibler': 1,
'itakura-saito': 0}
if isinstance(beta_loss, str) and beta_loss in allowed_beta_loss:
beta_loss = allowed_beta_loss[beta_loss]
if not isinstance(beta_loss, numbers.Number):
raise ValueError('Invalid beta_loss parameter: got %r instead '
'of one of %r, or a float.' %
(beta_loss, allowed_beta_loss.keys()))
return beta_loss
def _initialize_nmf(X, n_components, init=None, eps=1e-6,
random_state=None):
"""Algorithms for NMF initialization.
Computes an initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix to be decomposed.
n_components : integer
The number of components desired in the approximation.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise 'random'.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
eps : float
Truncate all values less then this in output to zero.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``random`` == 'nndsvdar' or 'random'.
Returns
-------
W : array-like, shape (n_samples, n_components)
Initial guesses for solving X ~= WH
H : array-like, shape (n_components, n_features)
Initial guesses for solving X ~= WH
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
n_samples, n_features = X.shape
if init is None:
if n_components < n_features:
init = 'nndsvd'
else:
init = 'random'
# Random initialization
if init == 'random':
avg = np.sqrt(X.mean() / n_components)
rng = check_random_state(random_state)
H = avg * rng.randn(n_components, n_features)
W = avg * rng.randn(n_samples, n_components)
# we do not write np.abs(H, out=H) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(H, H)
np.abs(W, W)
return W, H
# NNDSVD initialization
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if init == "nndsvd":
pass
elif init == "nndsvda":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif init == "nndsvdar":
rng = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * rng.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * rng.randn(len(H[H == 0])) / 100)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'random', 'nndsvd', 'nndsvda', 'nndsvdar')))
return W, H
def _update_coordinate_descent(X, W, Ht, l1_reg, l2_reg, shuffle,
random_state):
"""Helper function for _fit_coordinate_descent
Update W to minimize the objective function, iterating once over all
coordinates. By symmetry, to update H, one can call
_update_coordinate_descent(X.T, Ht, W, ...)
"""
n_components = Ht.shape[1]
HHt = np.dot(Ht.T, Ht)
XHt = safe_sparse_dot(X, Ht)
# L2 regularization corresponds to increase of the diagonal of HHt
if l2_reg != 0.:
# adds l2_reg only on the diagonal
HHt.flat[::n_components + 1] += l2_reg
# L1 regularization corresponds to decrease of each element of XHt
if l1_reg != 0.:
XHt -= l1_reg
if shuffle:
permutation = random_state.permutation(n_components)
else:
permutation = np.arange(n_components)
# The following seems to be required on 64-bit Windows w/ Python 3.5.
permutation = np.asarray(permutation, dtype=np.intp)
return _update_cdnmf_fast(W, HHt, XHt, permutation)
def _fit_coordinate_descent(X, W, H, tol=1e-4, max_iter=200, l1_reg_W=0,
l1_reg_H=0, l2_reg_W=0, l2_reg_H=0, update_H=True,
verbose=0, shuffle=False, random_state=None):
"""Compute Non-negative Matrix Factorization (NMF) with Coordinate Descent
The objective function is minimized with an alternating minimization of W
and H. Each minimization is done with a cyclic (up to a permutation of the
features) Coordinate Descent.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Initial guess for the solution.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
l1_reg_W : double, default: 0.
L1 regularization parameter for W.
l1_reg_H : double, default: 0.
L1 regularization parameter for H.
l2_reg_W : double, default: 0.
L2 regularization parameter for W.
l2_reg_H : double, default: 0.
L2 regularization parameter for H.
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
The number of iterations done by the algorithm.
References
----------
Cichocki, Andrzej, and Phan, Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
# so W and Ht are both in C order in memory
Ht = check_array(H.T, order='C')
X = check_array(X, accept_sparse='csr')
rng = check_random_state(random_state)
for n_iter in range(max_iter):
violation = 0.
# Update W
violation += _update_coordinate_descent(X, W, Ht, l1_reg_W,
l2_reg_W, shuffle, rng)
# Update H
if update_H:
violation += _update_coordinate_descent(X.T, Ht, W, l1_reg_H,
l2_reg_H, shuffle, rng)
if n_iter == 0:
violation_init = violation
if violation_init == 0:
break
if verbose:
print("violation:", violation / violation_init)
if violation / violation_init <= tol:
if verbose:
print("Converged at iteration", n_iter + 1)
break
return W, Ht.T, n_iter
def _multiplicative_update_w(X, W, H, beta_loss, l1_reg_W, l2_reg_W, gamma,
H_sum=None, HHt=None, XHt=None, update_H=True):
"""update W in Multiplicative Update NMF"""
if beta_loss == 2:
# Numerator
if XHt is None:
XHt = safe_sparse_dot(X, H.T)
if update_H:
# avoid a copy of XHt, which will be re-computed (update_H=True)
numerator = XHt
else:
# preserve the XHt, which is not re-computed (update_H=False)
numerator = XHt.copy()
# Denominator
if HHt is None:
HHt = np.dot(H, H.T)
denominator = np.dot(W, HHt)
else:
# Numerator
# if X is sparse, compute WH only where X is non zero
WH_safe_X = _special_sparse_dot(W, H, X)
if sp.issparse(X):
WH_safe_X_data = WH_safe_X.data
X_data = X.data
else:
WH_safe_X_data = WH_safe_X
X_data = X
# copy used in the Denominator
WH = WH_safe_X.copy()
if beta_loss - 1. < 0:
WH[WH == 0] = EPSILON
# to avoid taking a negative power of zero
if beta_loss - 2. < 0:
WH_safe_X_data[WH_safe_X_data == 0] = EPSILON
if beta_loss == 1:
np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data)
elif beta_loss == 0:
# speeds up computation time
# refer to /numpy/numpy/issues/9363
WH_safe_X_data **= -1
WH_safe_X_data **= 2
# element-wise multiplication
WH_safe_X_data *= X_data
else:
WH_safe_X_data **= beta_loss - 2
# element-wise multiplication
WH_safe_X_data *= X_data
# here numerator = dot(X * (dot(W, H) ** (beta_loss - 2)), H.T)
numerator = safe_sparse_dot(WH_safe_X, H.T)
# Denominator
if beta_loss == 1:
if H_sum is None:
H_sum = np.sum(H, axis=1) # shape(n_components, )
denominator = H_sum[np.newaxis, :]
else:
# computation of WHHt = dot(dot(W, H) ** beta_loss - 1, H.T)
if sp.issparse(X):
# memory efficient computation
# (compute row by row, avoiding the dense matrix WH)
WHHt = np.empty(W.shape)
for i in range(X.shape[0]):
WHi = np.dot(W[i, :], H)
if beta_loss - 1 < 0:
WHi[WHi == 0] = EPSILON
WHi **= beta_loss - 1
WHHt[i, :] = np.dot(WHi, H.T)
else:
WH **= beta_loss - 1
WHHt = np.dot(WH, H.T)
denominator = WHHt
# Add L1 and L2 regularization
if l1_reg_W > 0:
denominator += l1_reg_W
if l2_reg_W > 0:
denominator = denominator + l2_reg_W * W
denominator[denominator == 0] = EPSILON
numerator /= denominator
delta_W = numerator
# gamma is in ]0, 1]
if gamma != 1:
delta_W **= gamma
return delta_W, H_sum, HHt, XHt
def _multiplicative_update_h(X, W, H, beta_loss, l1_reg_H, l2_reg_H, gamma):
"""update H in Multiplicative Update NMF"""
if beta_loss == 2:
numerator = safe_sparse_dot(W.T, X)
denominator = np.dot(np.dot(W.T, W), H)
else:
# Numerator
WH_safe_X = _special_sparse_dot(W, H, X)
if sp.issparse(X):
WH_safe_X_data = WH_safe_X.data
X_data = X.data
else:
WH_safe_X_data = WH_safe_X
X_data = X
# copy used in the Denominator
WH = WH_safe_X.copy()
if beta_loss - 1. < 0:
WH[WH == 0] = EPSILON
# to avoid division by zero
if beta_loss - 2. < 0:
WH_safe_X_data[WH_safe_X_data == 0] = EPSILON
if beta_loss == 1:
np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data)
elif beta_loss == 0:
# speeds up computation time
# refer to /numpy/numpy/issues/9363
WH_safe_X_data **= -1
WH_safe_X_data **= 2
# element-wise multiplication
WH_safe_X_data *= X_data
else:
WH_safe_X_data **= beta_loss - 2
# element-wise multiplication
WH_safe_X_data *= X_data
# here numerator = dot(W.T, (dot(W, H) ** (beta_loss - 2)) * X)
numerator = safe_sparse_dot(W.T, WH_safe_X)
# Denominator
if beta_loss == 1:
W_sum = np.sum(W, axis=0) # shape(n_components, )
W_sum[W_sum == 0] = 1.
denominator = W_sum[:, np.newaxis]
# beta_loss not in (1, 2)
else:
# computation of WtWH = dot(W.T, dot(W, H) ** beta_loss - 1)
if sp.issparse(X):
# memory efficient computation
# (compute column by column, avoiding the dense matrix WH)
WtWH = np.empty(H.shape)
for i in range(X.shape[1]):
WHi = np.dot(W, H[:, i])
if beta_loss - 1 < 0:
WHi[WHi == 0] = EPSILON
WHi **= beta_loss - 1
WtWH[:, i] = np.dot(W.T, WHi)
else:
WH **= beta_loss - 1
WtWH = np.dot(W.T, WH)
denominator = WtWH
# Add L1 and L2 regularization
if l1_reg_H > 0:
denominator += l1_reg_H
if l2_reg_H > 0:
denominator = denominator + l2_reg_H * H
denominator[denominator == 0] = EPSILON
numerator /= denominator
delta_H = numerator
# gamma is in ]0, 1]
if gamma != 1:
delta_H **= gamma
return delta_H
def _fit_multiplicative_update(X, W, H, beta_loss='frobenius',
max_iter=200, tol=1e-4,
l1_reg_W=0, l1_reg_H=0, l2_reg_W=0, l2_reg_H=0,
update_H=True, verbose=0):
"""Compute Non-negative Matrix Factorization with Multiplicative Update
The objective function is _beta_divergence(X, WH) and is minimized with an
alternating minimization of W and H. Each minimization is done with a
Multiplicative Update.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant input matrix.
W : array-like, shape (n_samples, n_components)
Initial guess for the solution.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
beta_loss : float or string, default 'frobenius'
String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}.
Beta divergence to be minimized, measuring the distance between X
and the dot product WH. Note that values different from 'frobenius'
(or 2) and 'kullback-leibler' (or 1) lead to significantly slower
fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
matrix X cannot contain zeros.
max_iter : integer, default: 200
Number of iterations.
tol : float, default: 1e-4
Tolerance of the stopping condition.
l1_reg_W : double, default: 0.
L1 regularization parameter for W.
l1_reg_H : double, default: 0.
L1 regularization parameter for H.
l2_reg_W : double, default: 0.
L2 regularization parameter for W.
l2_reg_H : double, default: 0.
L2 regularization parameter for H.
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
verbose : integer, default: 0
The verbosity level.
Returns
-------
W : array, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
The number of iterations done by the algorithm.
References
----------
Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix
factorization with the beta-divergence. Neural Computation, 23(9).
"""
start_time = time.time()
beta_loss = _beta_loss_to_float(beta_loss)
# gamma for Maximization-Minimization (MM) algorithm [Fevotte 2011]
if beta_loss < 1:
gamma = 1. / (2. - beta_loss)
elif beta_loss > 2:
gamma = 1. / (beta_loss - 1.)
else:
gamma = 1.
# used for the convergence criterion
error_at_init = _beta_divergence(X, W, H, beta_loss, square_root=True)
previous_error = error_at_init
H_sum, HHt, XHt = None, None, None
for n_iter in range(1, max_iter + 1):
# update W
# H_sum, HHt and XHt are saved and reused if not update_H
delta_W, H_sum, HHt, XHt = _multiplicative_update_w(
X, W, H, beta_loss, l1_reg_W, l2_reg_W, gamma,
H_sum, HHt, XHt, update_H)
W *= delta_W
# necessary for stability with beta_loss < 1
if beta_loss < 1:
W[W < np.finfo(np.float64).eps] = 0.
# update H
if update_H:
delta_H = _multiplicative_update_h(X, W, H, beta_loss, l1_reg_H,
l2_reg_H, gamma)
H *= delta_H
# These values will be recomputed since H changed
H_sum, HHt, XHt = None, None, None
# necessary for stability with beta_loss < 1
if beta_loss <= 1:
H[H < np.finfo(np.float64).eps] = 0.
# test convergence criterion every 10 iterations
if tol > 0 and n_iter % 10 == 0:
error = _beta_divergence(X, W, H, beta_loss, square_root=True)
if verbose:
iter_time = time.time()
print("Epoch %02d reached after %.3f seconds, error: %f" %
(n_iter, iter_time - start_time, error))
if (previous_error - error) / error_at_init < tol:
break
previous_error = error
# do not print if we have already printed in the convergence test
if verbose and (tol == 0 or n_iter % 10 != 0):
end_time = time.time()
print("Epoch %02d reached after %.3f seconds." %
(n_iter, end_time - start_time))
return W, H, n_iter
def non_negative_factorization(X, W=None, H=None, n_components=None,
init='random', update_H=True, solver='cd',
beta_loss='frobenius', tol=1e-4,
max_iter=200, alpha=0., l1_ratio=0.,
regularization=None, random_state=None,
verbose=0, shuffle=False):
r"""Compute Non-negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
For multiplicative-update ('mu') solver, the Frobenius norm
(0.5 * ||X - WH||_Fro^2) can be changed into another beta-divergence loss,
by changing the beta_loss parameter.
The objective function is minimized with an alternating minimization of W
and H. If H is given and update_H=False, it solves for W only.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
If update_H=False, it is used as a constant, to solve for W only.
n_components : integer
Number of components, if n_components is not set all features
are kept.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
solver : 'cd' | 'mu'
Numerical solver to use:
'cd' is a Coordinate Descent solver that uses Fast Hierarchical
Alternating Least Squares (Fast HALS).
'mu' is a Multiplicative Update solver.
.. versionadded:: 0.17
Coordinate Descent solver.
.. versionadded:: 0.19
Multiplicative Update solver.
beta_loss : float or string, default 'frobenius'
String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}.
Beta divergence to be minimized, measuring the distance between X
and the dot product WH. Note that values different from 'frobenius'
(or 2) and 'kullback-leibler' (or 1) lead to significantly slower
fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
matrix X cannot contain zeros. Used only in 'mu' solver.
.. versionadded:: 0.19
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
regularization : 'both' | 'components' | 'transformation' | None
Select whether the regularization affects the components (H), the
transformation (W), both or none of them.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
Actual number of iterations.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import non_negative_factorization
>>> W, H, n_iter = non_negative_factorization(X, n_components=2,
... init='random', random_state=0)
References
----------
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix
factorization with the beta-divergence. Neural Computation, 23(9).
"""
X = check_array(X, accept_sparse=('csr', 'csc'), dtype=float)
check_non_negative(X, "NMF (input X)")
beta_loss = _check_string_param(solver, regularization, beta_loss, init)
if safe_min(X) == 0 and beta_loss <= 0:
raise ValueError("When beta_loss <= 0 and X contains zeros, "
"the solver may diverge. Please add small values to "
"X, or use a positive beta_loss.")
n_samples, n_features = X.shape
if n_components is None:
n_components = n_features
if not isinstance(n_components, INTEGER_TYPES) or n_components <= 0:
raise ValueError("Number of components must be a positive integer;"
" got (n_components=%r)" % n_components)
if not isinstance(max_iter, INTEGER_TYPES) or max_iter < 0:
raise ValueError("Maximum number of iterations must be a positive "
"integer; got (max_iter=%r)" % max_iter)
if not isinstance(tol, numbers.Number) or tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % tol)
# check W and H, or initialize them
if init == 'custom' and update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
_check_init(W, (n_samples, n_components), "NMF (input W)")
elif not update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
# 'mu' solver should not be initialized by zeros
if solver == 'mu':
avg = np.sqrt(X.mean() / n_components)
W = avg * np.ones((n_samples, n_components))
else:
W = np.zeros((n_samples, n_components))
else:
W, H = _initialize_nmf(X, n_components, init=init,
random_state=random_state)
l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H = _compute_regularization(
alpha, l1_ratio, regularization)
if solver == 'cd':
W, H, n_iter = _fit_coordinate_descent(X, W, H, tol, max_iter,
l1_reg_W, l1_reg_H,
l2_reg_W, l2_reg_H,
update_H=update_H,
verbose=verbose,
shuffle=shuffle,
random_state=random_state)
elif solver == 'mu':
W, H, n_iter = _fit_multiplicative_update(X, W, H, beta_loss, max_iter,
tol, l1_reg_W, l1_reg_H,
l2_reg_W, l2_reg_H, update_H,
verbose)
else:
raise ValueError("Invalid solver parameter '%s'." % solver)
if n_iter == max_iter and tol > 0:
warnings.warn("Maximum number of iteration %d reached. Increase it to"
" improve convergence." % max_iter, ConvergenceWarning)
return W, H, n_iter
class NMF(BaseEstimator, TransformerMixin):
r"""Non-Negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
For multiplicative-update ('mu') solver, the Frobenius norm
(0.5 * ||X - WH||_Fro^2) can be changed into another beta-divergence loss,
by changing the beta_loss parameter.
The objective function is minimized with an alternating minimization of W
and H.
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all features
are kept.
init : 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
solver : 'cd' | 'mu'
Numerical solver to use:
'cd' is a Coordinate Descent solver.
'mu' is a Multiplicative Update solver.
.. versionadded:: 0.17
Coordinate Descent solver.
.. versionadded:: 0.19
Multiplicative Update solver.
beta_loss : float or string, default 'frobenius'
String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}.
Beta divergence to be minimized, measuring the distance between X
and the dot product WH. Note that values different from 'frobenius'
(or 2) and 'kullback-leibler' (or 1) lead to significantly slower
fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
matrix X cannot contain zeros. Used only in 'mu' solver.
.. versionadded:: 0.19
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
.. versionadded:: 0.17
*alpha* used in the Coordinate Descent solver.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
.. versionadded:: 0.17
Regularization parameter *l1_ratio* used in the Coordinate Descent
solver.
verbose : bool, default=False
Whether to be verbose.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
.. versionadded:: 0.17
*shuffle* parameter used in the Coordinate Descent solver.
Attributes
----------
components_ : array, [n_components, n_features]
Factorization matrix, sometimes called 'dictionary'.
reconstruction_err_ : number
Frobenius norm of the matrix difference, or beta-divergence, between
the training data ``X`` and the reconstructed data ``WH`` from
the fitted model.
n_iter_ : int
Actual number of iterations.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1, 1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import NMF
>>> model = NMF(n_components=2, init='random', random_state=0)
>>> W = model.fit_transform(X)
>>> H = model.components_
References
----------
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix
factorization with the beta-divergence. Neural Computation, 23(9).
"""
def __init__(self, n_components=None, init=None, solver='cd',
beta_loss='frobenius', tol=1e-4, max_iter=200,
random_state=None, alpha=0., l1_ratio=0., verbose=0,
shuffle=False):
self.n_components = n_components
self.init = init
self.solver = solver
self.beta_loss = beta_loss
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.alpha = alpha
self.l1_ratio = l1_ratio
self.verbose = verbose
self.shuffle = shuffle
def fit_transform(self, X, y=None, W=None, H=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
y : Ignored
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
Returns
-------
W : array, shape (n_samples, n_components)
Transformed data.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), dtype=float)
W, H, n_iter_ = non_negative_factorization(
X=X, W=W, H=H, n_components=self.n_components, init=self.init,
update_H=True, solver=self.solver, beta_loss=self.beta_loss,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle)
self.reconstruction_err_ = _beta_divergence(X, W, H, self.beta_loss,
square_root=True)
self.n_components_ = H.shape[0]
self.components_ = H
self.n_iter_ = n_iter_
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
y : Ignored
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be transformed by the model
Returns
-------
W : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'n_components_')
W, _, n_iter_ = non_negative_factorization(
X=X, W=None, H=self.components_, n_components=self.n_components_,
init=self.init, update_H=False, solver=self.solver,
beta_loss=self.beta_loss, tol=self.tol, max_iter=self.max_iter,
alpha=self.alpha, l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle)
return W
def inverse_transform(self, W):
"""Transform data back to its original space.
Parameters
----------
W : {array-like, sparse matrix}, shape (n_samples, n_components)
Transformed data matrix
Returns
-------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix of original shape
.. versionadded:: 0.18
"""
check_is_fitted(self, 'n_components_')
return np.dot(W, self.components_)
|
bsd-3-clause
|
roshantha9/AbstractManycoreSim
|
src/libApplicationModel/HEVCSyntheticWorkloadValidator_fast.py
|
1
|
26004
|
import pprint
import argparse
import sys, os
import random
import time
import math
import gc
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
#plt.style.use('ggplot')
from collections import OrderedDict
from scipy.stats import rv_discrete
from scipy.stats import exponweib
import scipy.stats as ss
import itertools
import json
import csv
from operator import itemgetter
import networkx as nx
import operator
from collections import Counter, Sequence
from collections import OrderedDict
from scipy.stats import exponweib
from scipy.stats import rv_discrete
import multiprocessing
import simpy
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
## local imports
from SimParams import SimParams
from AdaptiveGoPGenerator import AdaptiveGoPGenerator
from AdaptiveGoPGenerator_v2 import AdaptiveGoPGenerator_v2
# pregen data files
from libApplicationModel.DataPreloader import DataPreloader
import libApplicationModel.HEVCWorkloadParams as HEVCWLP
from libApplicationModel.HEVCFrameTask import HEVCFrameTask
from libNoCModel.NoCFlow import NoCFlow
#EXP_DATADIR = "../experiment_data/workload_validation_fast_cc_scaledown/"
EXP_DATADIR = "../experiment_data/workload_validation_fast_cucc_scale_06_01/"
RANDOM_SEEDS = [81665, 33749, 43894, 26358, 80505, 83660, 22817, 70263, 29917, 26044]
#RANDOM_SEEDS = [81665, 33749, 43894, 26358, 80505]
#RANDOM_SEEDS = [80505]
NOC_W = 8
NOC_H = 8
NOC_PERIOD = 0.00000001
NOC_ARBITRATION_COST = 7.0 * NOC_PERIOD
# testbench class for hevc frame
class HEVCSyntheticWorkloadValidator():
def __init__(self, seed, num_gops, vid_res, movie_type):
self.env = simpy.Environment()
self.seed = seed
self.num_gops = num_gops
self.vid_res = vid_res
self.movie_type = movie_type
self.generated_gops_list = []
def _write_formatted_file(self, fname, data, format):
if(format == "pretty"):
logfile=open(fname, 'w')
pprint(data, logfile, width=128)
elif(format == "json"):
logfile=open(fname, 'w')
json_data = json.dumps(data)
logfile.write(json_data)
else:
logfile=open(fname, 'w')
pprint(data, logfile, width=128)
def generateMultipleGoPs(self, nBmax, N):
print "generateMultipleGoPs:: SEED === " + str(self.seed)
random.seed(self.seed)
np.random.seed(self.seed)
task_start_id = 0
for gid in xrange(self.num_gops):
GoP_obj = HEVCGoP_TB(self.env, nBmax, N,
task_start_id, gid, self.vid_res,
self.movie_type,
)
GoP_obj.generateHEVCGoP()
self.generated_gops_list.append(GoP_obj)
print "finished : ", gid
task_start_id += N
def dump_statistics(self, gop_fname, fr_fname):
gop_data_fname = gop_fname
frame_data_fname = fr_fname
gop_data = {
'contig_bframes' : [],
'numP' : [],
'numB' : [],
'refdist' : []
}
# get gop data
for each_gop in self.generated_gops_list:
gop_data['contig_bframes'].extend(each_gop.num_contiguous_b_frames)
gop_data['numP'].append(each_gop.num_P)
gop_data['numB'].append(each_gop.num_B)
gop_data['refdist'].extend(each_gop.ref_fr_dist)
# get frame level data
frame_data = {
'trackvalidate_prop_cu_sizes' : {
"intra" : {64:0, 32:0, 16:0, 8:0, 4:0},
"inter" : {64:0, 32:0, 16:0, 8:0, 4:0}
},
'trackvalidate_prop_cu_types' : {
"I" : {"ICU":0, "PCU":0, "BCU":0, "SkipCU":0},
"P" : {"ICU":0, "PCU":0, "BCU":0, "SkipCU":0},
"B" : {"ICU":0, "PCU":0, "BCU":0, "SkipCU":0},
},
'trackvalidate_cu_dectime' : {"ICU_cc":[], "PCU_cc":[], "BCU_cc":[], "SkipCU_cc":[]},
'trackvalidate_reffrdata' : {
"P<-I" : [],
"P<-P" : [],
"B<-I" : [],
"B<-P" : [],
"B<-B" : []
},
'trackvalidate_frdectime' : {
"I" : [], "P" : [], "B" : []
},
'trackvalidate_frencsize' : {
"I" : [], "P" : [], "B" : []
}
}
# frame-level, data
for each_gop in self.generated_gops_list:
for each_frame in each_gop.hevc_frames:
ftype = each_frame.get_frameType()
# trackvalidate_prop_cu_sizes
if ftype in ["I"]:
for k,v in each_frame.trackvalidate_prop_cu_sizes.iteritems():
frame_data['trackvalidate_prop_cu_sizes']['intra'][k]+=v
elif ftype in ["P", "B"]:
for k,v in each_frame.trackvalidate_prop_cu_sizes.iteritems():
frame_data['trackvalidate_prop_cu_sizes']['inter'][k]+=v
else:
sys.exit("unknown ftype = " + str(ftype))
# trackvalidate_prop_cu_types
#pprint.pprint(each_frame.trackvalidate_prop_cu_types)
for k,v in each_frame.trackvalidate_prop_cu_types.iteritems():
frame_data['trackvalidate_prop_cu_types'][ftype][k]+=v
# trackvalidate_cu_dectime
for k,v in each_frame.trackvalidate_cu_dectime.iteritems():
frame_data['trackvalidate_cu_dectime'][k].extend(v)
# trackvalidate_reffrdata
for each_parent_node, payload in each_frame.trackvalidate_reffrdata.iteritems():
parent_ftype = each_parent_node[0]
refdata_dir_k = "%s<-%s" % (ftype, parent_ftype)
frame_data['trackvalidate_reffrdata'][refdata_dir_k].append(payload)
# frame size (encoded) and decoding time
if ftype == "I":
frame_data['trackvalidate_frencsize']["I"].append(each_frame.get_mpeg_tasksize())
frame_data['trackvalidate_frdectime']["I"].append(each_frame.get_computationCost())
elif ftype == "P":
frame_data['trackvalidate_frencsize']["P"].append(each_frame.get_mpeg_tasksize())
frame_data['trackvalidate_frdectime']["P"].append(each_frame.get_computationCost())
elif ftype == "B":
frame_data['trackvalidate_frencsize']["B"].append(each_frame.get_mpeg_tasksize())
frame_data['trackvalidate_frdectime']["B"].append(each_frame.get_computationCost())
else:
sys.exit("frsize- unknown ftype = " + str(ftype))
# debug
#if ftype == "B":
# print ftype, each_frame.get_frameIXinGOP(), each_frame.trackvalidate_reffrdata
# write out (gop data)
logfile=open(gop_data_fname, 'w')
json_data = json.dumps(gop_data)
logfile.write(json_data)
# write out (frame data)
logfile=open(frame_data_fname, 'w')
json_data = json.dumps(frame_data)
logfile.write(json_data)
def dump_video_workload_stats(self, vidworkload_data_fname):
vid_wkl_stats = {
# gop-level stats
'gop_info': [],
# worst-case TG stats
'max_num_nodes' : None,
'max_num_edges' : None,
'fr_cc_I' : [],
'fr_cc_P' : [],
'fr_cc_B' : [],
'fr_ref_data_payloads' : [],
'fr_enc_sizes' : [],
'wc_comp_cost_I' : None,
'wc_comp_cost_P' : None,
'wc_comp_cost_B' : None,
}
# frame-level, data
all_gop_stats = []
for gix, each_gop in enumerate(self.generated_gops_list):
gop_stats = {
# high level stats
'gop_ix' : gix,
'gop_sequence' : each_gop.gop_seq,
'numP' : each_gop.num_P,
'numB' : each_gop.num_B,
'num_edges' : each_gop.num_edges,
'num_nodes' : each_gop.num_nodes,
'real_ccr' : None
}
all_ref_data_payloads = []
all_fr_ccs = []
for each_frame in each_gop.hevc_frames:
ftype = each_frame.get_frameType()
# computation costs
if ftype == "I":
vid_wkl_stats['fr_cc_I'].append(each_frame.get_computationCost())
elif ftype == "P":
vid_wkl_stats['fr_cc_P'].append(each_frame.get_computationCost())
elif ftype == "B":
vid_wkl_stats['fr_cc_B'].append(each_frame.get_computationCost())
else:
sys.exit("frsize- unknown ftype = " + str(ftype))
all_fr_ccs.append(each_frame.get_computationCost())
# communication payload
for each_parent_node, payload in each_frame.trackvalidate_reffrdata.iteritems():
vid_wkl_stats['fr_ref_data_payloads'].append(payload)
all_ref_data_payloads.append(payload)
# encoded fr size
vid_wkl_stats['fr_enc_sizes'].append(each_frame.get_mpeg_tasksize())
# add gop stats to video stats
vid_wkl_stats['gop_info'].append(gop_stats)
# calculate gop ccr
gop_stats['real_ccr'] = self._get_ccr(all_ref_data_payloads, all_fr_ccs, each_gop.num_nodes, each_gop.num_edges)
# calculate worst-case stats (video-level)
vid_wkl_stats['max_num_nodes'] = np.max([ g['num_nodes'] for g in vid_wkl_stats['gop_info'] ])
vid_wkl_stats['max_num_edges'] = np.max([ g['num_edges'] for g in vid_wkl_stats['gop_info'] ])
vid_wkl_stats['wc_comp_cost_I'] = np.max(vid_wkl_stats['fr_cc_I'])
vid_wkl_stats['wc_comp_cost_P'] = np.max(vid_wkl_stats['fr_cc_P'])
vid_wkl_stats['wc_comp_cost_B'] = np.max(vid_wkl_stats['fr_cc_B'])
# write out (frame data)
logfile=open(vidworkload_data_fname, 'w')
json_data = json.dumps(vid_wkl_stats)
logfile.write(json_data)
def _get_ccr(self, payloads, fr_ccs, num_nodes, num_edges ):
# checks
assert(len(payloads) == num_edges)
assert(len(fr_ccs) == num_nodes)
total_nodes_cost = np.sum(fr_ccs)
total_edges_cost = 0
nhops= (NOC_H-1) + (NOC_W-1)
for each_flw_payload in payloads:
total_edges_cost += NoCFlow.getCommunicationCost(each_flw_payload, nhops, NOC_PERIOD, NOC_ARBITRATION_COST)
ratio_ccr = float(total_edges_cost)/float(total_nodes_cost)
return ratio_ccr
# testbench class for hevc gop
class HEVCGoP_TB():
def __init__(self, env, nBmax, N,
task_start_id,gid, vid_res,
movieType = 'DOC',
):
# gop-level stats
self.env = env
self.movieType = movieType
self.N = N
self.nBmax = nBmax
self.task_start_id = task_start_id
self.gop_id = gid
self.vid_res = vid_res
# to calculate
self.num_contiguous_b_frames = [] # different numbers of contiguous frames
self.num_P = None
self.num_B = None
self.ref_fr_dist = [] # different numbers of ref dist
# frames in gop
self.gop_seq = None
self.hevc_frames = []
self.num_edges = None
self.num_nodes = None
def calculateStats(self):
# contig b-frames
self.num_contiguous_b_frames = []
splited_gop = self.gop_seq[1:].split("P")
for each_sp_gop in splited_gop:
if len(each_sp_gop) > 0:
self.num_contiguous_b_frames.append(len(each_sp_gop))
# P/B counts
self.num_P = self.gop_seq.count("P")
self.num_B = self.gop_seq.count("B")
def generateHEVCGoP(self):
gop_size_N = self.N
nBmax = self.nBmax
movie_type = self.movieType
task_start_id = self.task_start_id
gop_id = self.gop_id
unique_gop_start_id = gop_id
frame_h = self.vid_res[0]
frame_w = self.vid_res[1]
priority_range = range(1,100) # dummy
strm_resolution = frame_h*frame_w
# n_frame_tile_rows = random.choice(range(2,SimParams.HEVC_PICTURE_SPECIFIC_LIMITS[strm_resolution]['max_tiles_rows'],1))
# n_frame_tile_cols = random.choice(range(2,SimParams.HEVC_PICTURE_SPECIFIC_LIMITS[strm_resolution]['max_tiles_cols'],1))
# n_frame_slices = n_frame_tile_rows*n_frame_tile_cols
n_frame_slices = 1
ctu_size = (SimParams.HEVC_CTU_SIZE)
#n_frame_tiles = n_frame_tile_rows*n_frame_tile_cols
n_frame_tiles = 1
total_num_ctus = int(float(strm_resolution)/float(ctu_size))
#n_ctus_per_slice = self._get_num_ctus_per_slice(n_frame_slices, total_num_ctus)
n_ctus_per_slice = [total_num_ctus]
#frame_slice_info = self.calc_hevc_slices_constantslicetype(n_frame_slices)
frame_slice_info = tmp_fr_slice_info = {
"I" : {'interleaved_slice_types' : ['Is']},
"P" : {'interleaved_slice_types' : ['Ps']},
"B" : {'interleaved_slice_types' : ['Bs']},
}
# only when using probabilistic gop model
gopprobmodel_fixed_GoPSize_N = gop_size_N
gopprobmodel_fixed_nBMax = nBmax
gopprobmodel_fixed_movietype = movie_type # @UndefinedVariable
gop_instance_frames = [] # reset
unique_gop_id = gop_id
pfr_num_refs = 1
bfr_num_refs = [1,2]
# generate a gop structure
AGG = AdaptiveGoPGenerator_v2(gopprobmodel_fixed_nBMax, gopprobmodel_fixed_GoPSize_N,
SimParams.HEVC_ADAPTIVEGOPGEN_PARAM_PFRAME_REFS,
SimParams.HEVC_ADAPTIVEGOPGEN_PARAM_BFRAME_REFS,
movieType=gopprobmodel_fixed_movietype
)
AGG.verifyAndRecreateGoP() # this will create a valid gop (brute-force checking)
# calculate reference distance of nodes
ref_distance_list = []
for each_node in AGG.get_networkxDG().nodes():
if "B" in each_node:
for each_parent in AGG.get_networkxDG().predecessors(each_node):
ref_dist = np.abs(AdaptiveGoPGenerator_v2.nxLabel2Frame(each_node)['fix'] - AdaptiveGoPGenerator_v2.nxLabel2Frame(each_parent)['fix'])
ref_distance_list.append(ref_dist)
if ref_dist > 3: # checking
print each_node, each_parent, AGG.get_gopSeq()
sys.exit()
else:
pass
self.ref_fr_dist = ref_distance_list
gop_sequence = AGG.get_gopSeq()
self.gop_seq = gop_sequence
gop_frame_dec_order = AGG.getDecodingOrder()
print gop_sequence, len(gop_sequence), n_frame_tiles
# get num edges + nodes
self.num_edges = AGG.getTotalEdges()
self.num_nodes = AGG.getTotalNodes()
# construct task ids
gop_task_ids = range(task_start_id, task_start_id+len(gop_sequence))
#print gop_task_ids
AGG.initialiseInternalStructures(gop_task_ids)
for frame_id, each_frame in enumerate(gop_sequence):
frame_task = HEVCFrameTask(env = self.env,
#id = (((each_gop_id) * len(gop_sequence) + frame_id) + task_start_id), \
id = gop_task_ids[frame_id],
frame_type = str(each_frame),
frame_ix_in_gop = frame_id,
unique_gop_id = unique_gop_id,
gop_id = gop_id,
gop_struct = gop_sequence,
video_stream_id = 0,
wf_id = 0,
frame_h=frame_h,
frame_w=frame_w,
video_genre = gopprobmodel_fixed_movietype,
priority = priority_range[frame_id],
num_slices_per_frame = n_frame_slices,
num_tiles_per_frame = n_frame_tiles,
interleaved_slice_types = frame_slice_info[str(each_frame)]['interleaved_slice_types'],
num_ctu_per_slice=n_ctus_per_slice,
adaptiveGoP_Obj=AGG,
load_data_from_file = SimParams.HEVC_LOAD_FRAME_DATAFILE,
gop_decode_frame_order = gop_frame_dec_order,
gop_decode_order_ix = gop_frame_dec_order[0].index(frame_id),
enable_workload_validation = True
)
gop_instance_frames.append(frame_task)
assert(n_frame_tiles == frame_task.getNumSubTasksTiles()), \
", generateHEVCFrameTaskSet:: error in tile generation : %d, %d" % (n_frame_tiles, frame_task.getNumSubTasksTiles())
# calculate the data dependency
#task_level_decoded_data_output = self.hevcframelevel_calc_expected_data_to_children(gop_instance_frames, task_level_decoded_data_output)
task_start_id = gop_task_ids[-1]+1
# populate the data deps for each task in task list
#self.populate_expected_data_to_children(task_level_decoded_data_output)
# see if we can flush the frame block-level info
self.hevc_flush_frame_block_level_data()
# save the generated frames
self.hevc_frames = gop_instance_frames
# calculate some gop level stats
self.calculateStats()
# for each task, we delete the frame block level info.
def hevc_flush_frame_block_level_data(self):
if SimParams.HEVC_MODEL_FLUSH_FRAMEBLOCK_INFO == True:
for each_task in self.hevc_frames:
#print "BEFORE : hevc_flush_frame_block_level_data : blk_len, tid: ", len(each_task.frame_block_partitions.keys()), each_task.get_id()
each_task.hack_abstract__frame_block_partitions()
#print "AFTER : hevc_flush_frame_block_level_data : blk_len, tid: ", len(each_task.frame_block_partitions.keys()), each_task.get_id()
else:
pass
collected = gc.collect()
print "Garbage collector: collected %d objects." % (collected)
def multiprocessing_job_instance(each_res, each_gop_len, each_mov_type):
cc_scale_down = 0.1 # this makes the CCR go High
HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['ICU'] = (HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['ICU'][0]*float(cc_scale_down), HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['ICU'][1]*float(cc_scale_down))
HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['PCU'] = (HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['PCU'][0]*float(cc_scale_down), HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['PCU'][1]*float(cc_scale_down))
HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['BCU'] = (HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['BCU'][0]*float(cc_scale_down), HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['BCU'][1]*float(cc_scale_down))
HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['SkipCU'] = (HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['SkipCU'][0]*float(cc_scale_down), HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['SkipCU'][1]*float(cc_scale_down))
for each_seed in RANDOM_SEEDS:
print "==============================================="
print "seed: ", each_seed
print "res: ", each_res
print "gop_len: ", each_gop_len
print "mov_type: ", each_mov_type
print "==============================================="
random.seed(each_seed)
np.random.seed(each_seed)
# dump fnames
res_str = str(each_res[0]) + "x" + str(each_res[1])
fname_prefix = "__s%d_G%d_N%d_B%d_%s" % (each_seed, args.num_gops, each_gop_len, args.nbmax, each_mov_type)
vidworkload_data_fname = EXP_DATADIR + "vidworkload_data_res" + res_str + "_" + fname_prefix + ".js"
HSWV = HEVCSyntheticWorkloadValidator(seed=each_seed, num_gops=args.num_gops, vid_res=each_res, movie_type=each_mov_type)
HSWV.generateMultipleGoPs(args.nbmax, each_gop_len)
HSWV.dump_video_workload_stats(vidworkload_data_fname)
print ""
print "-- FINISHED --"
print "==============================================="
#################################################
# MAIN - start the generation and validation
#################################################
parser = argparse.ArgumentParser(__file__, description="Generate HEVC GoP/frame workloads")
parser.add_argument("--seed", "-w", help="seed", type=int, default=1234)
parser.add_argument("--num_gops", help="num of gops", type=int, default=8)
parser.add_argument("--gop_len", help="gop length", type=int, default=26)
parser.add_argument("--nbmax", help="max contig. B fr", type=int, default=4)
args = parser.parse_args()
#DataPreloader.preload_data_files(fname="../hevc_pregen_data_files/pregen_pus/hevc_probmodel_generate_PU_per_CTU_200ctus.p")
#DataPreloader.preload_data_files(fname="../hevc_pregen_data_files/pregen_pus/hevc_probmodel_generate_PU_per_CTU_10ctus.p")
jobs = []
#######################
# temp fixed settings #
# args.seed = 1234
# args.num_gops = 3
# args.gop_len = 26
# args.nbmax = 4
# vid_resolution_list = [(1280,720)
# ]
#movie_type = "ANIM"
vid_resolution_list = [(3840,2160),(2560,1440),
(1920,1080),(1280,720),
(854,480),(640,360),
(512,288),
]
mov_list = [ 'ACTION', 'DOC', 'SPORT', 'SPEECH', 'ANIM' ]
gop_len_list = [16, 31]
#######################
#pprint.pprint(HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR)
# print "args.seed :" + str(args.seed)
# print "args.num_gops :" + str(args.num_gops)
# print "args.gop_len :" + str(args.gop_len)
# print "args.nbmax :" + str(args.nbmax)
# print "vid_res :" + str(vid_resolution_list)
# print "movie_type :" + movie_type
# create and dump
jobs = []
for each_res in vid_resolution_list:
for each_gop_len in gop_len_list:
for each_mov_type in mov_list:
# create a thread
p = multiprocessing.Process(target=multiprocessing_job_instance, args=(each_res, each_gop_len, each_mov_type, ))
jobs.append(p)
#p.start()
# start the processes
for p in jobs:
p.start()
# exit the completed processes
for p in jobs:
p.join()
|
gpl-3.0
|
Lawrence-Liu/scikit-learn
|
sklearn/ensemble/tests/test_forest.py
|
57
|
35265
|
"""
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import product
import numpy as np
from scipy.sparse import csr_matrix, csc_matrix, coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, X, y):
# Check variable importances.
ForestClassifier = FOREST_CLASSIFIERS[name]
for n_jobs in [1, 2]:
clf = ForestClassifier(n_estimators=10, n_jobs=n_jobs)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = clf.transform(X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
importances = clf.feature_importances_
assert_true(np.all(importances >= 0.0))
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = clf.feature_importances_
assert_almost_equal(importances, importances_bis)
def test_importances():
X, y = datasets.make_classification(n_samples=1000, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name in FOREST_CLASSIFIERS:
yield check_importances, name, X, y
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name, X, y):
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name, X, y
def check_min_samples_leaf(name, X, y):
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
est = ForestEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_min_samples_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name, X, y
def check_min_weight_fraction_leaf(name, X, y):
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
if isinstance(est, (RandomForestClassifier,
RandomForestRegressor)):
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name, X, y
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=40)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
def test_1d_input():
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert():
classifier = RandomForestClassifier()
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
|
bsd-3-clause
|
RachitKansal/scikit-learn
|
sklearn/metrics/classification.py
|
95
|
67713
|
"""Metrics to assess performance on classification task given classe prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Jatin Shah <[email protected]>
# Saurabh Jha <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy.spatial.distance import hamming as sp_hamming
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from .base import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<http://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
CM = coo_matrix((np.ones(y_true.shape[0], dtype=np.int), (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1], a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2].
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistic 34(4):555-596.
"""
confusion = confusion_matrix(y1, y2, labels=labels)
P = confusion / float(confusion.sum())
p_observed = np.trace(P)
p_expected = np.dot(P.sum(axis=0), P.sum(axis=1))
return (p_observed - p_expected) / (1 - p_expected)
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<http://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
with np.errstate(invalid='ignore'):
mcc = np.corrcoef(y_true, y_pred)[0, 1]
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<http://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary' and (y_type != 'binary' or pos_label is None):
warnings.warn('The default `weighted` averaging is deprecated, '
'and from version 0.18, use of precision, recall or '
'F-score with multiclass or multilabel data or '
'pos_label=None will result in an exception. '
'Please set an explicit value for `average`, one of '
'%s. In cross validation use, for instance, '
'scoring="f1_weighted" instead of scoring="f1".'
% str(average_options), DeprecationWarning, stacklevel=2)
average = 'weighted'
if y_type == 'binary' and pos_label is not None and average is not None:
if average != 'binary':
warnings.warn('From version 0.18, binary input will not be '
'handled specially when using averaged '
'precision/recall/F-score. '
'Please use average=\'binary\' to report only the '
'positive class performance.', DeprecationWarning)
if labels is None or len(labels) <= 2:
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
### Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
### Finally, we have all our sufficient statistics. Divide! ###
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
## Average the results ##
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
width = len(last_line_heading)
target_names = ['%s' % l for l in labels]
else:
width = max(len(cn) for cn in target_names)
width = max(width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
classes : array, shape = [n_labels], optional
Integer array of labels.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<http://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if classes is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(classes)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred)
return (n_differences / (y_true.shape[0] * len(classes)))
elif y_type in ["binary", "multiclass"]:
return sp_hamming(y_true, y_pred)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
lb = LabelBinarizer()
T = lb.fit_transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
# Clipping
Y = np.clip(y_pred, eps, 1 - eps)
# This happens in cases when elements in y_pred have type "str".
if not isinstance(Y, np.ndarray):
raise ValueError("y_pred should be an array of floats.")
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
# Check if dimensions are consistent.
check_consistent_length(T, Y)
T = check_array(T)
Y = check_array(Y)
if T.shape[1] != Y.shape[1]:
raise ValueError("y_true and y_pred have different number of classes "
"%d, %d" % (T.shape[1], Y.shape[1]))
# Renormalize
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * np.log(Y)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<http://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) != 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
http://en.wikipedia.org/wiki/Brier_score
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
|
bsd-3-clause
|
krez13/scikit-learn
|
sklearn/naive_bayes.py
|
29
|
28917
|
# -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <[email protected]>
# Minor fixes by Fabian Pedregosa
# Amit Aides <[email protected]>
# Yehuda Finkelstein <[email protected]>
# Lars Buitinck <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.fixes import in1d
from .utils.validation import check_is_fitted
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Gaussian Naive Bayes supports fitting with *sample_weight*.
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Returns
-------
self : object
Returns self.
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit: bool
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
# If the ratio of data variance between dimensions is too small, it
# will cause numerical errors. To address this, we artificially
# boost the variance by epsilon, a small fraction of the standard
# deviation of the largest dimension.
epsilon = 1e-9 * np.var(X, axis=0).max()
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_prior_ = np.zeros(n_classes)
self.class_count_ = np.zeros(n_classes)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= epsilon
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += epsilon
self.class_prior_[:] = self.class_count_ / np.sum(self.class_count_)
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_)
- np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes]
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T)
+ self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,]
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
|
bsd-3-clause
|
jorge2703/scikit-learn
|
examples/classification/plot_classification_probability.py
|
242
|
2624
|
"""
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
|
bsd-3-clause
|
tgsmith61591/pyramid
|
pmdarima/model_selection/_validation.py
|
1
|
13410
|
# -*- coding: utf-8 -*-
"""
Cross-validation for ARIMA and pipeline estimators.
See: https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_validation.py
""" # noqa: E501
import numpy as np
import numbers
import warnings
import time
from traceback import format_exception_only
from sklearn import base
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.utils import indexable
from ._split import check_cv
from .. import metrics
from ..utils import check_endog
from ..warnings import ModelFitWarning
from ..compat.sklearn import safe_indexing
from ..compat import pmdarima as pm_compat
__all__ = [
'cross_validate',
'cross_val_predict',
'cross_val_score',
]
_valid_scoring = {
'mean_absolute_error': mean_absolute_error,
'mean_squared_error': mean_squared_error,
'smape': metrics.smape,
}
_valid_averaging = {
'mean': np.nanmean,
'median': np.nanmedian,
}
def _check_callables(x, dct, varname):
if callable(x):
return x
if isinstance(x, str):
try:
return dct[x]
except KeyError:
valid_keys = list(dct.keys())
raise ValueError('%s can be a callable or a string in %s'
% (varname, str(valid_keys)))
raise TypeError('expected a callable or a string, but got %r (type=%s)'
% (x, type(x)))
def _check_averaging(method):
return _check_callables(method, _valid_averaging, "averaging")
def _check_scoring(metric):
return _check_callables(metric, _valid_scoring, "metric")
def _safe_split(y, X, train, test):
"""Performs the CV indexing given the indices"""
y_train, y_test = y.take(train), y.take(test)
if X is None:
X_train = X_test = None
else:
X_train, X_test = safe_indexing(X, train), safe_indexing(X, test)
return y_train, y_test, X_train, X_test
def _fit_and_score(fold, estimator, y, X, scorer, train, test, verbose,
error_score):
"""Fit estimator and compute scores for a given dataset split."""
msg = 'fold=%i' % fold
if verbose > 1:
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
start_time = time.time()
y_train, y_test, X_train, X_test = _safe_split(y, X, train, test)
try:
estimator.fit(y_train, X=X_train)
except Exception as e:
fit_time = time.time() - start_time
score_time = 0.0
if error_score == 'raise':
raise
else:
test_scores = error_score
warnings.warn("Estimator fit failed. The score on this train-test "
"partition will be set to %f. Details: \n%s"
% (error_score,
format_exception_only(type(e), e)[0]),
ModelFitWarning)
else:
fit_time = time.time() - start_time
# forecast h periods into the future and compute the score
preds = estimator.predict(n_periods=len(test), X=X_test)
test_scores = scorer(y_test, preds)
score_time = time.time() - start_time - fit_time
if verbose > 2:
total_time = score_time + fit_time
msg += ", score=%.3f [time=%.3f sec]" % (test_scores, total_time)
print(msg)
# TODO: if we ever want train scores, we'll need to change this signature
return test_scores, fit_time, score_time
def _fit_and_predict(fold, estimator, y, X, train, test, verbose):
"""Fit estimator and compute scores for a given dataset split."""
msg = 'fold=%i' % fold
if verbose > 1:
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
start_time = time.time()
y_train, _, X_train, X_test = _safe_split(y, X, train, test)
# scikit doesn't handle failures on cv predict, so we won't either.
estimator.fit(y_train, X=X_train)
fit_time = time.time() - start_time
# forecast h periods into the future
start_time = time.time()
preds = estimator.predict(n_periods=len(test), X=X_test)
pred_time = time.time() - start_time
if verbose > 2:
total_time = pred_time + fit_time
msg += " [time=%.3f sec]" % (total_time)
print(msg)
return preds, test
def cross_validate(estimator,
y,
X=None,
scoring=None,
cv=None,
verbose=0,
error_score=np.nan,
**kwargs): # TODO: remove kwargs
"""Evaluate metric(s) by cross-validation and also record fit/score times.
Parameters
----------
estimator : estimator
An estimator object that implements the ``fit`` method
y : array-like or iterable, shape=(n_samples,)
The time-series array.
X : array-like, shape=[n_obs, n_vars], optional (default=None)
An optional 2-d array of exogenous variables.
scoring : str or callable, optional (default=None)
The scoring metric to use. If a callable, must adhere to the signature
``metric(true, predicted)``. Valid string scoring metrics include:
- 'smape'
- 'mean_absolute_error'
- 'mean_squared_error'
cv : BaseTSCrossValidator or None, optional (default=None)
An instance of cross-validation. If None, will use a RollingForecastCV
verbose : integer, optional
The verbosity level.
error_score : 'raise' or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, ModelFitWarning is raised. This parameter
does not affect the refit step, which will always raise the error.
"""
# Temporary shim until we remove `exogenous` support completely
X, _ = pm_compat.get_X(X, **kwargs)
y, X = indexable(y, X)
y = check_endog(y, copy=False)
cv = check_cv(cv)
scoring = _check_scoring(scoring)
# validate the error score
if not (error_score == "raise" or isinstance(error_score, numbers.Number)):
raise ValueError('error_score should be the string "raise" or a '
'numeric value')
# TODO: in the future we might consider joblib for parallelizing, but it
# . could cause cross threads in parallelism..
results = [
_fit_and_score(fold,
base.clone(estimator),
y,
X,
scorer=scoring,
train=train,
test=test,
verbose=verbose,
error_score=error_score)
for fold, (train, test) in enumerate(cv.split(y, X))]
scores, fit_times, score_times = list(zip(*results))
ret = {
'test_score': np.array(scores),
'fit_time': np.array(fit_times),
'score_time': np.array(score_times),
}
return ret
def cross_val_predict(estimator,
y,
X=None,
cv=None,
verbose=0,
averaging="mean",
**kwargs): # TODO: remove kwargs
"""Generate cross-validated estimates for each input data point
Parameters
----------
estimator : estimator
An estimator object that implements the ``fit`` method
y : array-like or iterable, shape=(n_samples,)
The time-series array.
X : array-like, shape=[n_obs, n_vars], optional (default=None)
An optional 2-d array of exogenous variables.
cv : BaseTSCrossValidator or None, optional (default=None)
An instance of cross-validation. If None, will use a RollingForecastCV.
Note that for cross-validation predictions, the CV step cannot exceed
the CV horizon, or there will be a gap between fold predictions.
verbose : integer, optional
The verbosity level.
averaging : str or callable, one of ["median", "mean"] (default="mean")
Unlike normal CV, time series CV might have different folds (windows)
forecasting the same time step. After all forecast windows are made,
we build a matrix of y x n_folds, populating each fold's forecasts like
so::
nan nan nan # training samples
nan nan nan
nan nan nan
nan nan nan
1 nan nan # test samples
4 3 nan
3 2.5 3.5
nan 6 5
nan nan 4
We then average each time step's forecasts to end up with our final
prediction results.
Examples
--------
>>> import pmdarima as pm
>>> from pmdarima.model_selection import cross_val_predict,\
... RollingForecastCV
>>> y = pm.datasets.load_wineind()
>>> cv = RollingForecastCV(h=14, step=12)
>>> preds = cross_val_predict(
... pm.ARIMA((1, 1, 2), seasonal_order=(0, 1, 1, 12)), y, cv=cv)
>>> preds[:5]
array([30710.45743168, 34902.94929722, 17994.16587163, 22127.71167249,
25473.60876435])
"""
# Temporary shim until we remove `exogenous` support completely
X, _ = pm_compat.get_X(X, **kwargs)
y, X = indexable(y, X)
y = check_endog(y, copy=False)
cv = check_cv(cv)
avgfunc = _check_averaging(averaging)
# need to be careful here:
# >>> cv = RollingForecastCV(step=6, h=4)
# >>> cv_generator = cv.split(wineind)
# >>> next(cv_generator)
# (array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
# 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
# 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
# 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57]),
# array([58, 59, 60, 61]))
# >>> next(cv_generator)
# (array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
# 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
# 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
# 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
# 60, 61, 62, 63]),
# array([64, 65, 66, 67])) <~~ 64 vs. 61
if cv.step > cv.horizon:
raise ValueError("CV step cannot be > CV horizon, or there will be a "
"gap in predictions between folds")
# clone estimator to make sure all folds are independent
prediction_blocks = [
_fit_and_predict(fold,
base.clone(estimator),
y,
X,
train=train,
test=test,
verbose=verbose,) # TODO: fit params?
for fold, (train, test) in enumerate(cv.split(y, X))]
# Unlike normal CV, time series CV might have different folds (windows)
# forecasting the same time step. In this stage, we build a matrix of
# y x n_folds, populating each fold's forecasts like so:
pred_matrix = np.ones((y.shape[0], len(prediction_blocks))) * np.nan
for i, (pred_block, test_indices) in enumerate(prediction_blocks):
pred_matrix[test_indices, i] = pred_block
# from there, we need to apply nanmean (or some other metric) along rows
# to agree on a forecast for a sample.
test_mask = ~(np.isnan(pred_matrix).all(axis=1))
predictions = pred_matrix[test_mask]
return avgfunc(predictions, axis=1)
def cross_val_score(estimator,
y,
X=None,
scoring=None,
cv=None,
verbose=0,
error_score=np.nan,
**kwargs): # TODO: remove kwargs
"""Evaluate a score by cross-validation
Parameters
----------
estimator : estimator
An estimator object that implements the ``fit`` method
y : array-like or iterable, shape=(n_samples,)
The time-series array.
X : array-like, shape=[n_obs, n_vars], optional (default=None)
An optional 2-d array of exogenous variables.
scoring : str or callable, optional (default=None)
The scoring metric to use. If a callable, must adhere to the signature
``metric(true, predicted)``. Valid string scoring metrics include:
- 'smape'
- 'mean_absolute_error'
- 'mean_squared_error'
cv : BaseTSCrossValidator or None, optional (default=None)
An instance of cross-validation. If None, will use a RollingForecastCV
verbose : integer, optional
The verbosity level.
error_score : 'raise' or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, ModelFitWarning is raised. This parameter
does not affect the refit step, which will always raise the error.
"""
# Temporary shim until we remove `exogenous` support completely
X, _ = pm_compat.get_X(X, **kwargs)
cv_results = cross_validate(estimator=estimator,
y=y,
X=X,
scoring=scoring,
cv=cv,
verbose=verbose,
error_score=error_score)
return cv_results['test_score']
|
mit
|
kjung/scikit-learn
|
examples/linear_model/plot_bayesian_ridge.py
|
50
|
2733
|
"""
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weights with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
lw = 2
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, color='lightgreen', linewidth=lw,
label="Bayesian Ridge estimate")
plt.plot(w, color='gold', linewidth=lw, label="Ground truth")
plt.plot(ols.coef_, color='navy', linestyle='--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, color='gold', log=True)
plt.scatter(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
color='navy', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="upper left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_, color='navy', linewidth=lw)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
|
bsd-3-clause
|
jergosh/slr_pipeline
|
bin/calc_aln_dists.py
|
1
|
6041
|
import glob
import argparse
from os import path
from Bio import SeqIO
from Bio import AlignIO
import dendropy
import pandas
import numpy as np
np.set_printoptions(threshold=np.nan)
import bpp
from phylo_utils import likelihood
from phylo_utils import models
from phylo_utils import markov
from phylo_utils.seq_to_partials import seq_to_partials
from Bio.SeqUtils.ProtParam import ProteinAnalysis
from Bio.SubsMat.MatrixInfo import pam250
# import pairdist
import bpp
AAs = [ 'A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'W', 'V', 'Y' ]
subst_matrix = np.zeros((20, 20))
for x in range(20):
for y in range(20):
if (AAs[x], AAs[y]) in pam250:
subst_matrix[x, y] = pam250[(AAs[x], AAs[y])]
else:
subst_matrix[x, y] = pam250[(AAs[y], AAs[x])]
def site_freqs(aln):
site_freqs = []
for i in range(aln.get_alignment_length()):
freqs = ProteinAnalysis(''.join(aln[:, i])).get_amino_acids_percent()
freqs = np.array([ freqs[aa] for aa in AAs ])
if sum(freqs) == 0.0:
freqs = np.array([ 1.0 for _ in range(20) ])
else:
freqs /= np.sum(freqs)
site_freqs.append(freqs)
return np.array(site_freqs)
def process_family_bpp(df, domaindir, treedir, dataset_map):
# TODO New, tree-like calculation
# - calculate the partials at the root of each family,
# using branch lengths from the
# - estimate the branch length using pairs of partials
res_cath_id, res_id1, res_id2, res_dist = [], [], [], []
cath_id = df.cath_id.iloc[0]
all_ids = []
seqs = []
print cath_id
for i, l in df.iterrows():
if l.stable_id not in dataset_map:
continue
dataset = dataset_map[l.stable_id]
prefix = dataset.partition('_')[0][:2]
aln_file = path.join(domaindir,
prefix,
l.stable_id+'_'+dataset+'.fa')
aln = SeqIO.to_dict(SeqIO.parse(aln_file, 'fasta'))
tree_file = path.join(treedir,
prefix,
dataset+'.nh')
tree = dendropy.Tree.get_from_path(tree_file, schema='newick')
partials_dict = {}
for seqr in aln.values():
partials_dict[seqr.id] = seq_to_partials(str(seqr.seq), 'protein')
TreeLn = likelihood.RunOnTree(markov.TransitionMatrix(models.LG()), partials_dict)
TreeLn.set_tree(tree)
print i, TreeLn.run()
print TreeLn.model
if len(seqs) < 2:
return
aln = bpp.Alignment(seqs, 'protein')
aln.set_substitution_model("WAG01")
aln.set_constant_rate_model()
aln.compute_distances()
dists = aln.get_distances()
for i_1, i_2 in zip(*[ list(i) for i in np.triu_indices(len(all_ids), 1) ]):
id_1 = seqs[i_1][0]
id_2 = seqs[i_2][0]
res_cath_id.append(cath_id)
res_id1.append(id_1)
res_id2.append(id_2)
res_dist.append(dists[i_1, i_2])
return pandas.DataFrame(data={ "id_1": res_id1,
"id_2": res_id2,
"dist": res_dist,
"cath_id": res_cath_id })
def process_family(df, domaindir, dataset_map):
# FIXME How to make sure we don't have to reorder the pairs to identify them
# perhaps reorder post-hoc in R
res_cath_id, res_id1, res_id2, res_dist = [], [], [], []
cath_id = df.cath_id.iloc[0]
profile_dict = {}
print cath_id
for i, l in df.iterrows():
if l.stable_id not in dataset_map:
continue
dataset = dataset_map[l.stable_id]
aln_file = path.join(domaindir,
dataset.partition('_')[0][:2],
l.stable_id+'_'+dataset+'.fa')
aln = AlignIO.read(aln_file, 'fasta')
freqs = site_freqs(aln)
profile_dict[l.stable_id] = freqs
all_ids = profile_dict.keys()
if len(all_ids) < 2:
return
for id_1, id_2 in zip(*[ list(i) for i in np.triu_indices(len(all_ids), 1) ]):
aln_1 = np.transpose(profile_dict[all_ids[id_1]])
aln_2 = profile_dict[all_ids[id_2]]
if len(aln_1.shape) == 1 or len(aln_2.shape) == 1:
continue
dist = 0.0
for i in range(aln_1.shape[1]):
dist += np.dot(np.dot(aln_1[:, i], subst_matrix), aln_2[i, :])
dist /= aln_1.shape[1]
res_cath_id.append(cath_id)
res_id1.append(all_ids[id_1])
res_id2.append(all_ids[id_2])
res_dist.append(dist)
return pandas.DataFrame(data={ "id_1": res_id1,
"id_2": res_id2,
"dist": res_dist,
"cath_id": res_cath_id })
argparser = argparse.ArgumentParser()
argparser.add_argument('--domaindir', metavar="dir", type=str, required=True)
argparser.add_argument('--treedir', metavar="dir", type=str, required=True)
argparser.add_argument('--dataset_map', metavar="file", type=str, required=True)
argparser.add_argument('--cath_map', metavar="file", type=str, required=True)
argparser.add_argument('--outfile', metavar="output_file", type=str, required=True)
def main():
args = argparser.parse_args()
dataset_map = {}
for l in open(args.dataset_map):
f = l.rstrip().split('\t')
dataset_map[f[0]] = f[1]
cath_map = pandas.read_table(args.cath_map, sep='\t',
names=["stable_id", "coords", "cath_id", "pdb_id"])
# For each protein family, iterate over all pairs
all_dists = cath_map.groupby('cath_id').apply(process_family_bpp,
args.domaindir, args.treedir,
dataset_map)
all_dists.to_csv(args.outfile, sep='\t', quoting=False, index=False)
if __name__ == "__main__":
main()
|
gpl-2.0
|
LangmuirSim/langmuir
|
LangmuirPython/plot/plotRDF.py
|
2
|
7329
|
# -*- coding: utf-8 -*-
"""
@author: adam
"""
import matplotlib.pyplot as plt
import langmuir as lm
import numpy as np
import argparse
import os
desc = """
Plot output from rdf.py
"""
def get_arguments(args=None):
parser = argparse.ArgumentParser()
parser.description = desc
parser.add_argument(dest='ifile', default='rdf.pkl', type=str, nargs='?',
metavar='input', help='input file')
parser.add_argument('--stub', default='', type=str, metavar='stub',
help='output file stub')
parser.add_argument('--ext', default='pdf', type=str, metavar='str',
choices=['png', 'pdf', 'jpg'], help='output file type')
parser.add_argument('--figure', default=(6.0, 6.0, 1.0, 1.0, 1.0, 1.0),
help='figure dimensions (w, h, l, r, t, b)')
parser.add_argument('--r1' , action='store_true', help='r1')
parser.add_argument('--w1' , action='store_true', help='w1')
parser.add_argument('--w2' , action='store_true', help='w2')
parser.add_argument('--d12', action='store_true', help='w2 - w1')
parser.add_argument('--d21', action='store_true', help='w1 - w2')
parser.add_argument('--s12', action='store_true', help='w1 + w2')
parser.add_argument('--all', action='store_true', help='all')
parser.add_argument('--normalize', action='store_true',
help='normalize to [0, 1]')
parser.add_argument('--detrend', action='store_true',
help='subtract average')
parser.add_argument('--abs', action='store_true',
help='take absolute value')
parser.add_argument('--rscale', type=float, default=1.0, help='scale r1')
parser.add_argument('--bins', type=int, default=256, help='number of bins')
parser.add_argument('--title' , default='')
parser.add_argument('--xlabel' , default='$\Delta r (nm)$')
parser.add_argument('--ylabel' , default='')
parser.add_argument('--xlim', default=(None, None), nargs=2, type=float)
parser.add_argument('--xmax', default=None, type=float)
parser.add_argument('--xmin', default=None, type=float)
parser.add_argument('--ylim', default=(None, None), nargs=2, type=float)
parser.add_argument('--ymax', default=None, type=float)
parser.add_argument('--ymin', default=None, type=float)
parser.add_argument('--xmult', default=None, type=float,
help='xtick multiple')
parser.add_argument('--ymult', default=None, type=float,
help='ytick multiple')
parser.add_argument('--xmaxn', default=4, type=int, help='xtick maxn')
parser.add_argument('--ymaxn', default=4, type=int, help='ytick maxn')
parser.add_argument('--fontsize' , default='large')
parser.add_argument('--labelsize', default=None)
parser.add_argument('--ticksize' , default=None)
parser.add_argument('--titlesize', default=None)
parser.add_argument('--legend', action='store_true', help='show legend')
parser.add_argument('--legendsize', default='xx-small')
parser.add_argument('--loc', default='best')
parser.add_argument('--show', action='store_true', help='show plot')
parser.add_argument('--save', action='store_true', help='save plot')
opts = parser.parse_args(args)
if opts.titlesize is None: opts.titlesize = opts.fontsize
if opts.labelsize is None: opts.labelsize = opts.fontsize
if opts.ticksize is None: opts.ticksize = opts.fontsize
if opts.xmin: opts.xlim[0] = opts.xmin
if opts.xmax: opts.xlim[1] = opts.xmax
if opts.ymin: opts.ylim[0] = opts.ymin
if opts.ymax: opts.ylim[1] = opts.ymax
opts.xmin, opts.xmax = opts.xlim
opts.ymin, opts.ymax = opts.ylim
if opts.all:
opts.w1 = True
opts.w2 = True
opts.d12 = True
opts.d21 = True
opts.s12 = True
popts = [opts.w1, opts.w2, opts.d12, opts.d21, opts.s12, opts.r1]
if not any(popts):
raise RuntimeError, 'must use --w1, --w2, etc to chose plot type'
if not opts.show and not opts.save:
opts.show = True
return opts
if __name__ == '__main__':
work = os.getcwd()
opts = get_arguments()
data = lm.common.load_pkl(opts.ifile)
image = data['image']
r1 = data['r1'] * opts.rscale
w1 = data['w1']
w2 = data['w2']
d12 = w2 - w1
d21 = w1 - w2
s12 = w1 + w2
kwargs = dict(bins=opts.bins)
counts_w1, edges = np.histogram(r1, weights=w1, **kwargs)
counts_w2, edges = np.histogram(r1, weights=w2, **kwargs)
counts_w3, edges = np.histogram(r1, weights=d21, **kwargs)
counts_w4, edges = np.histogram(r1, weights=d12, **kwargs)
counts_w5, edges = np.histogram(r1, weights=s12, **kwargs)
counts_r1, edges = np.histogram(r1, **kwargs)
counts_w1 = counts_w1 / counts_r1
counts_w2 = counts_w2 / counts_r1
counts_w3 = counts_w3 / counts_r1
counts_w4 = counts_w4 / counts_r1
counts_w5 = counts_w5 / counts_r1
if opts.abs:
counts_w1 = abs(counts_w1)
counts_w2 = abs(counts_w2)
counts_w3 = abs(counts_w3)
counts_w4 = abs(counts_w4)
counts_w5 = abs(counts_w5)
if opts.normalize:
counts_w1 = lm.surface.linear_mapping(counts_w1, 0, 1)
counts_w2 = lm.surface.linear_mapping(counts_w2, 0, 1)
counts_w3 = lm.surface.linear_mapping(counts_w3, 0, 1)
counts_w4 = lm.surface.linear_mapping(counts_w4, 0, 1)
counts_w5 = lm.surface.linear_mapping(counts_w5, 0, 1)
if opts.detrend:
counts_w1 = counts_w1 - np.average(counts_w1)
counts_w2 = counts_w2 - np.average(counts_w2)
counts_w3 = counts_w3 - np.average(counts_w3)
counts_w4 = counts_w4 - np.average(counts_w4)
counts_w5 = counts_w5 - np.average(counts_w5)
edges = 0.5 * (edges[1:] + edges[:-1])
fig, ax1 = lm.plot.subplots(1, 1, *opts.figure)
lm.plot.title(opts.title, fontsize=opts.titlesize)
plt.xlabel(opts.xlabel, size=opts.labelsize)
plt.ylabel(opts.ylabel, size=opts.labelsize)
plt.tick_params(labelsize=opts.ticksize)
if opts.w1:
plt.plot(edges, counts_w1, color=lm.plot.colors.r1, label='$(w1)$')
if opts.w2:
plt.plot(edges, counts_w2, color=lm.plot.colors.o1, label='$(w2)$')
if opts.d21:
plt.plot(edges, counts_w3, color=lm.plot.colors.y1, label='$(w1-w2)$')
if opts.d12:
plt.plot(edges, counts_w4, color=lm.plot.colors.g2, label='$(w2-w1)$')
if opts.s12:
plt.plot(edges, counts_w5, color=lm.plot.colors.b1, label='$(w1+w2)$')
if opts.r1:
plt.plot(edges, counts_r1, color='k', label='$(r1)$')
if opts.legend:
plt.legend(loc=opts.loc, fontsize=opts.legendsize, frameon=False)
if opts.xmax is None:
opts.xmax = np.amax(r1)
if opts.xmin is None:
opts.xmin = 0
plt.xlim(opts.xmin, opts.xmax)
plt.ylim(opts.ymin, opts.ymax)
lm.plot.maxn_locator(x=opts.xmaxn)
lm.plot.maxn_locator(y=opts.ymaxn)
if opts.xmult: lm.plot.multiple_locator(x=opts.xmult)
if opts.ymult: lm.plot.multiple_locator(y=opts.ymult)
ax1.yaxis.get_offset_text().set_size(opts.ticksize)
if opts.save:
handle = lm.common.format_output(stub=opts.stub, name='rdf',
ext=opts.ext)
print 'saved: %s' % handle
lm.plot.save(handle)
if opts.show:
plt.show()
|
gpl-2.0
|
demis001/bio_pieces
|
bio_pieces/group_references.py
|
3
|
2678
|
'''
Usage: group_references <samfile> [--outdir <DIR>]
Options:
--outdir=<DIR>,-o=<DIR> output directory [Default: group_references_out]
Create separate fastq files for each reference in a samfile.
'''
from docopt import docopt
import pandas as pd
from schema import Schema, Use
import sys
import os
import sh
if sys.version[0] == '3':
from io import StringIO as BytesIO
else:
from io import BytesIO
import string
import re
sam_columns = ["QNAME", "FLAG", "RNAME", "POS", "MAPQ", "CIGAR", "RNEXT", "PNEXT", "TLEN", "SEQ", "QUAL"]
def samview_to_df(rawtext):
'''
:param str rawtext: text of .sam file or output of `samtools view`
:return: pandas.DataFrame
'''
samtext = '\n'.join( fixline(row) for row in str(rawtext).split('\n') )
as_bytes = BytesIO(samtext)
#Write test to ensure handles varaibles #columns
return pd.read_csv(as_bytes, names=sam_columns, usecols=sam_columns, delimiter='\t', header=None, squeeze=True)
def fixline(row):
'''
Fix lines of `samtools view` which are unmapped so they can be easily parsed by pandas.
:row str row: a line from `samtools view`
:return: fixed line
'''
newrow = []
cols = row.split('\t')[:len(sam_columns)]
if len(cols) > 1 and cols[2] == '*':
cols[2] = 'unmapped'
return '\t'.join(cols)
def get_seqs_by_ctg(outdir, rawtext):
'''
Splits a given sam view into seperate fastq files, grouped by references (the first column RNAME), and saves to outdir.
:param str outdir: directory result fastqs are saved to
:param str rawtext: output of `samtools view`
:return: None
'''
sam_df = samview_to_df(rawtext)
contig_groups = sam_df.groupby('RNAME')
fastq = "@{0}\n{1}\n+\n{2}".format
for group in contig_groups:
_ref, reads = group[0], group[1]
ref = re.sub('[%s]' % string.punctuation, '_', _ref)
with open("{0}/{1}.group.fq".format(outdir, ref), 'w') as out:
map(out.writelines, '\n'.join(map(fastq, reads.QNAME, reads.SEQ, reads.QUAL)))
out.write('\n')
def main():
'''
Call `samtools view` on the input file and split into fastqs by RNAME column.
'''
raw_args = docopt(__doc__)
scheme = Schema({
'<samfile>' : str,
'--outdir' : str})
parsed_args = scheme.validate(raw_args)
outdir = parsed_args['--outdir']
if not os.path.exists(outdir):
os.mkdir(outdir)
infile = parsed_args['<samfile>']
view = str(sh.samtools('view', infile, S=True)) if infile.endswith('.sam') else str(sh.samtools('view', infile))
get_seqs_by_ctg(outdir, view)
return 0
if __name__ == '__main__':
main()
|
gpl-2.0
|
cwu2011/scikit-learn
|
examples/ensemble/plot_forest_importances.py
|
241
|
1761
|
"""
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(10):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(10), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(10), indices)
plt.xlim([-1, 10])
plt.show()
|
bsd-3-clause
|
liberatorqjw/scikit-learn
|
examples/semi_supervised/plot_label_propagation_structure.py
|
247
|
2432
|
"""
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Andreas Mueller <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
|
bsd-3-clause
|
neilzim/SCDA
|
old_notebooks/test_ampl_wrapper_irisao.py
|
1
|
1359
|
#!/usr/bin/env python3
"""
Test the functionaility of the core SCDA
02/14/2016 -- created by NTZ
"""
import scda
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
if __name__ == "__main__":
scda.configure_log("wrapper_test.log")
test_dir = "test_scda_aplc" # nominal destination for new AMPL programs
aux_dir = "~/SCDA/2d AMPL script - half pupil"
fileorg = {'work dir': test_dir, 'TelAp dir': aux_dir, 'FPM dir': aux_dir, 'LS dir': aux_dir,
'TelAp fname': "IRISAO_N=0150_center_half_spiders3=01_gapID=10_BW.dat",
'FPM fname': "CircPupil_N=0050_obs=00_center_half.dat",
'LS fname': "IRISAO-0_N=0150_center_half_spiders3=02_ID=20_OD=098.dat"}
pupil_params = {'N': 150}
# fpm_params = {'rad': 9.898/2, 'M':50}
fpm_params = {'rad': 6.466/2, 'M':50}
# ls_params = {'id': 10, 'od': 0.9}
ls_params = {}
image_params = {'c': 8., 'iwa':3., 'owa':10.}
design_params = {'Pupil': pupil_params, 'FPM': fpm_params, 'LS': ls_params, 'Image': image_params}
# solver_params = {'method': 'bar', 'presolve': False, 'Nthreads': 8}
solver_params = {}
irisao_coron = scda.HalfplaneAPLC(fileorg=fileorg, design=design_params, solver=solver_params, verbose=True)
irisao_coron.write_ampl(ampl_src_fname="irisao_hpaplc_C08.mod", overwrite=True)
|
mit
|
ThiagoGarciaAlves/intellij-community
|
python/helpers/pydev/_pydev_bundle/pydev_console_utils.py
|
3
|
24235
|
import os
import sys
import traceback
from _pydev_bundle.pydev_imports import xmlrpclib, _queue, Exec
from _pydev_bundle._pydev_calltip_util import get_description
from _pydev_imps._pydev_saved_modules import thread
from _pydevd_bundle import pydevd_vars
from _pydevd_bundle import pydevd_xml
from _pydevd_bundle.pydevd_constants import IS_JYTHON, dict_iter_items
from _pydevd_bundle.pydevd_utils import to_string
try:
import cStringIO as StringIO #may not always be available @UnusedImport
except:
try:
import StringIO #@Reimport
except:
import io as StringIO
# =======================================================================================================================
# Null
# =======================================================================================================================
class Null:
"""
Gotten from: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/68205
"""
def __init__(self, *args, **kwargs):
return None
def __call__(self, *args, **kwargs):
return self
def __getattr__(self, mname):
return self
def __setattr__(self, name, value):
return self
def __delattr__(self, name):
return self
def __repr__(self):
return "<Null>"
def __str__(self):
return "Null"
def __len__(self):
return 0
def __getitem__(self):
return self
def __setitem__(self, *args, **kwargs):
pass
def write(self, *args, **kwargs):
pass
def __nonzero__(self):
return 0
# =======================================================================================================================
# BaseStdIn
# =======================================================================================================================
class BaseStdIn:
def __init__(self, original_stdin=sys.stdin, *args, **kwargs):
try:
self.encoding = sys.stdin.encoding
except:
# Not sure if it's available in all Python versions...
pass
self.original_stdin = original_stdin
def readline(self, *args, **kwargs):
# sys.stderr.write('Cannot readline out of the console evaluation\n') -- don't show anything
# This could happen if the user had done input('enter number).<-- upon entering this, that message would appear,
# which is not something we want.
return '\n'
def write(self, *args, **kwargs):
pass # not available StdIn (but it can be expected to be in the stream interface)
def flush(self, *args, **kwargs):
pass # not available StdIn (but it can be expected to be in the stream interface)
def read(self, *args, **kwargs):
# in the interactive interpreter, a read and a readline are the same.
return self.readline()
def close(self, *args, **kwargs):
pass # expected in StdIn
def __iter__(self):
# BaseStdIn would not be considered as Iterable in Python 3 without explicit `__iter__` implementation
return self.original_stdin.__iter__()
def __getattr__(self, item):
# it's called if the attribute wasn't found
if hasattr(self.original_stdin, item):
return getattr(self.original_stdin, item)
raise AttributeError("%s has no attribute %s" % (self.original_stdin, item))
# =======================================================================================================================
# StdIn
# =======================================================================================================================
class StdIn(BaseStdIn):
'''
Object to be added to stdin (to emulate it as non-blocking while the next line arrives)
'''
def __init__(self, interpreter, host, client_port, original_stdin=sys.stdin):
BaseStdIn.__init__(self, original_stdin)
self.interpreter = interpreter
self.client_port = client_port
self.host = host
def readline(self, *args, **kwargs):
# Ok, callback into the client to get the new input
try:
server = xmlrpclib.Server('http://%s:%s' % (self.host, self.client_port))
requested_input = server.RequestInput()
if not requested_input:
return '\n' # Yes, a readline must return something (otherwise we can get an EOFError on the input() call).
return requested_input
except KeyboardInterrupt:
raise # Let KeyboardInterrupt go through -- #PyDev-816: Interrupting infinite loop in the Interactive Console
except:
return '\n'
def close(self, *args, **kwargs):
pass # expected in StdIn
#=======================================================================================================================
# DebugConsoleStdIn
#=======================================================================================================================
class DebugConsoleStdIn(BaseStdIn):
'''
Object to be added to stdin (to emulate it as non-blocking while the next line arrives)
'''
def __init__(self, dbg, original_stdin):
BaseStdIn.__init__(self, original_stdin)
self.debugger = dbg
def __pydev_run_command(self, is_started):
try:
cmd = self.debugger.cmd_factory.make_input_requested_message(is_started)
self.debugger.writer.add_command(cmd)
except Exception:
import traceback
traceback.print_exc()
return '\n'
def readline(self, *args, **kwargs):
# Notify Java side about input and call original function
self.__pydev_run_command(True)
result = self.original_stdin.readline(*args, **kwargs)
self.__pydev_run_command(False)
return result
class CodeFragment:
def __init__(self, text, is_single_line=True):
self.text = text
self.is_single_line = is_single_line
def append(self, code_fragment):
self.text = self.text + "\n" + code_fragment.text
if not code_fragment.is_single_line:
self.is_single_line = False
# =======================================================================================================================
# BaseInterpreterInterface
# =======================================================================================================================
class BaseInterpreterInterface:
def __init__(self, mainThread, connect_status_queue=None):
self.mainThread = mainThread
self.interruptable = False
self.exec_queue = _queue.Queue(0)
self.buffer = None
self.banner_shown = False
self.connect_status_queue = connect_status_queue
self.mpl_modules_for_patching = {}
self.init_mpl_modules_for_patching()
def build_banner(self):
return 'print({0})\n'.format(repr(self.get_greeting_msg()))
def get_greeting_msg(self):
return 'PyDev console: starting.\n'
def init_mpl_modules_for_patching(self):
from pydev_ipython.matplotlibtools import activate_matplotlib, activate_pylab, activate_pyplot
self.mpl_modules_for_patching = {
"matplotlib": lambda: activate_matplotlib(self.enableGui),
"matplotlib.pyplot": activate_pyplot,
"pylab": activate_pylab
}
def need_more_for_code(self, source):
# PyDev-502: PyDev 3.9 F2 doesn't support backslash continuations
# Strangely even the IPython console is_complete said it was complete
# even with a continuation char at the end.
if source.endswith('\\'):
return True
if hasattr(self.interpreter, 'is_complete'):
return not self.interpreter.is_complete(source)
try:
code = self.interpreter.compile(source, '<input>', 'exec')
except (OverflowError, SyntaxError, ValueError):
# Case 1
return False
if code is None:
# Case 2
return True
# Case 3
return False
def need_more(self, code_fragment):
if self.buffer is None:
self.buffer = code_fragment
else:
self.buffer.append(code_fragment)
return self.need_more_for_code(self.buffer.text)
def create_std_in(self, debugger=None, original_std_in=None):
if debugger is None:
return StdIn(self, self.host, self.client_port, original_stdin=original_std_in)
else:
return DebugConsoleStdIn(dbg=debugger, original_stdin=original_std_in)
def add_exec(self, code_fragment, debugger=None):
original_in = sys.stdin
try:
help = None
if 'pydoc' in sys.modules:
pydoc = sys.modules['pydoc'] # Don't import it if it still is not there.
if hasattr(pydoc, 'help'):
# You never know how will the API be changed, so, let's code defensively here
help = pydoc.help
if not hasattr(help, 'input'):
help = None
except:
# Just ignore any error here
pass
more = False
try:
sys.stdin = self.create_std_in(debugger, original_in)
try:
if help is not None:
# This will enable the help() function to work.
try:
try:
help.input = sys.stdin
except AttributeError:
help._input = sys.stdin
except:
help = None
if not self._input_error_printed:
self._input_error_printed = True
sys.stderr.write('\nError when trying to update pydoc.help.input\n')
sys.stderr.write('(help() may not work -- please report this as a bug in the pydev bugtracker).\n\n')
traceback.print_exc()
try:
self.start_exec()
if hasattr(self, 'debugger'):
from _pydevd_bundle import pydevd_tracing
pydevd_tracing.SetTrace(self.debugger.trace_dispatch)
more = self.do_add_exec(code_fragment)
if hasattr(self, 'debugger'):
from _pydevd_bundle import pydevd_tracing
pydevd_tracing.SetTrace(None)
self.finish_exec(more)
finally:
if help is not None:
try:
try:
help.input = original_in
except AttributeError:
help._input = original_in
except:
pass
finally:
sys.stdin = original_in
except SystemExit:
raise
except:
traceback.print_exc()
return more
def do_add_exec(self, codeFragment):
'''
Subclasses should override.
@return: more (True if more input is needed to complete the statement and False if the statement is complete).
'''
raise NotImplementedError()
def get_namespace(self):
'''
Subclasses should override.
@return: dict with namespace.
'''
raise NotImplementedError()
def __resolve_reference__(self, text):
"""
:type text: str
"""
obj = None
if '.' not in text:
try:
obj = self.get_namespace()[text]
except KeyError:
pass
if obj is None:
try:
obj = self.get_namespace()['__builtins__'][text]
except:
pass
if obj is None:
try:
obj = getattr(self.get_namespace()['__builtins__'], text, None)
except:
pass
else:
try:
last_dot = text.rindex('.')
parent_context = text[0:last_dot]
res = pydevd_vars.eval_in_context(parent_context, self.get_namespace(), self.get_namespace())
obj = getattr(res, text[last_dot + 1:])
except:
pass
return obj
def getDescription(self, text):
try:
obj = self.__resolve_reference__(text)
if obj is None:
return ''
return get_description(obj)
except:
return ''
def do_exec_code(self, code, is_single_line):
try:
code_fragment = CodeFragment(code, is_single_line)
more = self.need_more(code_fragment)
if not more:
code_fragment = self.buffer
self.buffer = None
self.exec_queue.put(code_fragment)
return more
except:
traceback.print_exc()
return False
def execLine(self, line):
if not self.banner_shown:
line = self.build_banner() + line
self.banner_shown = True
return self.do_exec_code(line, True)
def execMultipleLines(self, lines):
if not self.banner_shown:
lines = self.build_banner() + lines
self.banner_shown = True
if IS_JYTHON:
for line in lines.split('\n'):
self.do_exec_code(line, True)
else:
return self.do_exec_code(lines, False)
def interrupt(self):
self.buffer = None # Also clear the buffer when it's interrupted.
try:
if self.interruptable:
called = False
try:
# Fix for #PyDev-500: Console interrupt can't interrupt on sleep
import os
import signal
if os.name == 'posix':
# On Linux we can't interrupt 0 as in Windows because it's
# actually owned by a process -- on the good side, signals
# work much better on Linux!
os.kill(os.getpid(), signal.SIGINT)
called = True
elif os.name == 'nt':
# Stupid windows: sending a Ctrl+C to a process given its pid
# is absurdly difficult.
# There are utilities to make it work such as
# http://www.latenighthacking.com/projects/2003/sendSignal/
# but fortunately for us, it seems Python does allow a CTRL_C_EVENT
# for the current process in Windows if pid 0 is passed... if we needed
# to send a signal to another process the approach would be
# much more difficult.
# Still, note that CTRL_C_EVENT is only Python 2.7 onwards...
# Also, this doesn't seem to be documented anywhere!? (stumbled
# upon it by chance after digging quite a lot).
os.kill(0, signal.CTRL_C_EVENT)
called = True
except:
# Many things to go wrong (from CTRL_C_EVENT not being there
# to failing import signal)... if that's the case, ask for
# forgiveness and go on to the approach which will interrupt
# the main thread (but it'll only work when it's executing some Python
# code -- not on sleep() for instance).
pass
if not called:
if hasattr(thread, 'interrupt_main'): # Jython doesn't have it
thread.interrupt_main()
else:
self.mainThread._thread.interrupt() # Jython
self.finish_exec(False)
return True
except:
traceback.print_exc()
return False
def close(self):
sys.exit(0)
def start_exec(self):
self.interruptable = True
def get_server(self):
if getattr(self, 'host', None) is not None:
return xmlrpclib.Server('http://%s:%s' % (self.host, self.client_port))
else:
return None
server = property(get_server)
def ShowConsole(self):
server = self.get_server()
if server is not None:
server.ShowConsole()
def finish_exec(self, more):
self.interruptable = False
server = self.get_server()
if server is not None:
return server.NotifyFinished(more)
else:
return True
def getFrame(self):
xml = StringIO.StringIO()
hidden_ns = self.get_ipython_hidden_vars_dict()
xml.write("<xml>")
xml.write(pydevd_xml.frame_vars_to_xml(self.get_namespace(), hidden_ns))
xml.write("</xml>")
return xml.getvalue()
def getVariable(self, attributes):
xml = StringIO.StringIO()
xml.write("<xml>")
valDict = pydevd_vars.resolve_var(self.get_namespace(), attributes)
if valDict is None:
valDict = {}
keys = valDict.keys()
for k in keys:
val = valDict[k]
evaluate_full_value = pydevd_xml.should_evaluate_full_value(val)
xml.write(pydevd_vars.var_to_xml(val, k, evaluate_full_value=evaluate_full_value))
xml.write("</xml>")
return xml.getvalue()
def getArray(self, attr, roffset, coffset, rows, cols, format):
name = attr.split("\t")[-1]
array = pydevd_vars.eval_in_context(name, self.get_namespace(), self.get_namespace())
return pydevd_vars.table_like_struct_to_xml(array, name, roffset, coffset, rows, cols, format)
def evaluate(self, expression):
xml = StringIO.StringIO()
xml.write("<xml>")
result = pydevd_vars.eval_in_context(expression, self.get_namespace(), self.get_namespace())
xml.write(pydevd_vars.var_to_xml(result, expression))
xml.write("</xml>")
return xml.getvalue()
def loadFullValue(self, expressions):
xml = StringIO.StringIO()
xml.write("<xml>")
for expression in expressions:
result = pydevd_vars.eval_in_context(expression, self.get_namespace(), self.get_namespace())
xml.write(pydevd_vars.var_to_xml(result, expression, evaluate_full_value=True))
xml.write("</xml>")
return xml.getvalue()
def changeVariable(self, attr, value):
def do_change_variable():
Exec('%s=%s' % (attr, value), self.get_namespace(), self.get_namespace())
# Important: it has to be really enabled in the main thread, so, schedule
# it to run in the main thread.
self.exec_queue.put(do_change_variable)
def _findFrame(self, thread_id, frame_id):
'''
Used to show console with variables connection.
Always return a frame where the locals map to our internal namespace.
'''
VIRTUAL_FRAME_ID = "1" # matches PyStackFrameConsole.java
VIRTUAL_CONSOLE_ID = "console_main" # matches PyThreadConsole.java
if thread_id == VIRTUAL_CONSOLE_ID and frame_id == VIRTUAL_FRAME_ID:
f = FakeFrame()
f.f_globals = {} # As globals=locals here, let's simply let it empty (and save a bit of network traffic).
f.f_locals = self.get_namespace()
return f
else:
return self.orig_find_frame(thread_id, frame_id)
def connectToDebugger(self, debuggerPort, debugger_options=None):
'''
Used to show console with variables connection.
Mainly, monkey-patches things in the debugger structure so that the debugger protocol works.
'''
if debugger_options is None:
debugger_options = {}
env_key = "PYDEVD_EXTRA_ENVS"
if env_key in debugger_options:
for (env_name, value) in dict_iter_items(debugger_options[env_key]):
os.environ[env_name] = value
del debugger_options[env_key]
def do_connect_to_debugger():
try:
# Try to import the packages needed to attach the debugger
import pydevd
from _pydev_imps._pydev_saved_modules import threading
except:
# This happens on Jython embedded in host eclipse
traceback.print_exc()
sys.stderr.write('pydevd is not available, cannot connect\n', )
from _pydev_bundle import pydev_localhost
threading.currentThread().__pydevd_id__ = "console_main"
self.orig_find_frame = pydevd_vars.find_frame
pydevd_vars.find_frame = self._findFrame
self.debugger = pydevd.PyDB()
try:
pydevd.apply_debugger_options(debugger_options)
self.debugger.connect(pydev_localhost.get_localhost(), debuggerPort)
self.debugger.prepare_to_run()
from _pydevd_bundle import pydevd_tracing
pydevd_tracing.SetTrace(None)
except:
traceback.print_exc()
sys.stderr.write('Failed to connect to target debugger.\n')
# Register to process commands when idle
self.debugrunning = False
try:
import pydevconsole
pydevconsole.set_debug_hook(self.debugger.process_internal_commands)
except:
traceback.print_exc()
sys.stderr.write('Version of Python does not support debuggable Interactive Console.\n')
# Important: it has to be really enabled in the main thread, so, schedule
# it to run in the main thread.
self.exec_queue.put(do_connect_to_debugger)
return ('connect complete',)
def handshake(self):
if self.connect_status_queue is not None:
self.connect_status_queue.put(True)
return "PyCharm"
def get_connect_status_queue(self):
return self.connect_status_queue
def hello(self, input_str):
# Don't care what the input string is
return ("Hello eclipse",)
def enableGui(self, guiname):
''' Enable the GUI specified in guiname (see inputhook for list).
As with IPython, enabling multiple GUIs isn't an error, but
only the last one's main loop runs and it may not work
'''
def do_enable_gui():
from _pydev_bundle.pydev_versioncheck import versionok_for_gui
if versionok_for_gui():
try:
from pydev_ipython.inputhook import enable_gui
enable_gui(guiname)
except:
sys.stderr.write("Failed to enable GUI event loop integration for '%s'\n" % guiname)
traceback.print_exc()
elif guiname not in ['none', '', None]:
# Only print a warning if the guiname was going to do something
sys.stderr.write("PyDev console: Python version does not support GUI event loop integration for '%s'\n" % guiname)
# Return value does not matter, so return back what was sent
return guiname
# Important: it has to be really enabled in the main thread, so, schedule
# it to run in the main thread.
self.exec_queue.put(do_enable_gui)
def get_ipython_hidden_vars_dict(self):
return None
# =======================================================================================================================
# FakeFrame
# =======================================================================================================================
class FakeFrame:
'''
Used to show console with variables connection.
A class to be used as a mock of a frame.
'''
|
apache-2.0
|
NelisVerhoef/scikit-learn
|
sklearn/datasets/tests/test_svmlight_format.py
|
228
|
11221
|
from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
|
bsd-3-clause
|
ryfeus/lambda-packs
|
Tensorflow_LightGBM_Scipy_nightly/source/scipy/interpolate/fitpack.py
|
21
|
25622
|
from __future__ import print_function, division, absolute_import
__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
import warnings
import numpy as np
from ._fitpack_impl import bisplrep, bisplev, dblint
from . import _fitpack_impl as _impl
from ._bsplines import BSpline
def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
full_output=0, nest=None, per=0, quiet=1):
"""
Find the B-spline representation of an N-dimensional curve.
Given a list of N rank-1 arrays, `x`, which represent a curve in
N-dimensional space parametrized by `u`, find a smooth approximating
spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK.
Parameters
----------
x : array_like
A list of sample vector arrays representing the curve.
w : array_like, optional
Strictly positive rank-1 array of weights the same length as `x[0]`.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the `x` values have standard-deviation given by
the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``.
u : array_like, optional
An array of parameter values. If not given, these values are
calculated automatically as ``M = len(x[0])``, where
v[0] = 0
v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`)
u[i] = v[i] / v[M-1]
ub, ue : int, optional
The end-points of the parameters interval. Defaults to
u[0] and u[-1].
k : int, optional
Degree of the spline. Cubic splines are recommended.
Even values of `k` should be avoided especially with a small s-value.
``1 <= k <= 5``, default is 3.
task : int, optional
If task==0 (default), find t and c for a given smoothing factor, s.
If task==1, find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1
for the same set of data.
If task=-1 find the weighted least square spline for a given set of
knots, t.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``,
where g(x) is the smoothed interpolation of (x,y). The user can
use `s` to control the trade-off between closeness and smoothness
of fit. Larger `s` means more smoothing while smaller values of `s`
indicate less smoothing. Recommended values of `s` depend on the
weights, w. If the weights represent the inverse of the
standard-deviation of y, then a good `s` value should be found in
the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of
data points in x, y, and w.
t : int, optional
The knots needed for task=-1.
full_output : int, optional
If non-zero, then return optional outputs.
nest : int, optional
An over-estimate of the total number of knots of the spline to
help in determining the storage space. By default nest=m/2.
Always large enough is nest=m+k+1.
per : int, optional
If non-zero, data points are considered periodic with period
``x[m-1] - x[0]`` and a smooth periodic spline approximation is
returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used.
quiet : int, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
(t,c,k) a tuple containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
u : array
An array of the values of the parameter.
fp : float
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated
if ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splrep, splev, sproot, spalde, splint,
bisplrep, bisplev
UnivariateSpline, BivariateSpline
BSpline
make_interp_spline
Notes
-----
See `splev` for evaluation of the spline and its derivatives.
The number of dimensions N must be smaller than 11.
The number of coefficients in the `c` array is ``k+1`` less then the number
of knots, ``len(t)``. This is in contrast with `splrep`, which zero-pads
the array of coefficients to have the same length as the array of knots.
These additional coefficients are ignored by evaluation routines, `splev`
and `BSpline`.
References
----------
.. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines, Computer Graphics and Image Processing",
20 (1982) 171-184.
.. [2] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines", report tw55, Dept. Computer Science,
K.U.Leuven, 1981.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
Generate a discretization of a limacon curve in the polar coordinates:
>>> phi = np.linspace(0, 2.*np.pi, 40)
>>> r = 0.5 + np.cos(phi) # polar coords
>>> x, y = r * np.cos(phi), r * np.sin(phi) # convert to cartesian
And interpolate:
>>> from scipy.interpolate import splprep, splev
>>> tck, u = splprep([x, y], s=0)
>>> new_points = splev(u, tck)
Notice that (i) we force interpolation by using `s=0`,
(ii) the parameterization, ``u``, is generated automatically.
Now plot the result:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.plot(x, y, 'ro')
>>> ax.plot(new_points[0], new_points[1], 'r-')
>>> plt.show()
"""
res = _impl.splprep(x, w, u, ub, ue, k, task, s, t, full_output, nest, per,
quiet)
return res
def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
full_output=0, per=0, quiet=1):
"""
Find the B-spline representation of 1-D curve.
Given the set of data points ``(x[i], y[i])`` determine a smooth spline
approximation of degree k on the interval ``xb <= x <= xe``.
Parameters
----------
x, y : array_like
The data points defining a curve y = f(x).
w : array_like, optional
Strictly positive rank-1 array of weights the same length as x and y.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the y values have standard-deviation given by the
vector d, then w should be 1/d. Default is ones(len(x)).
xb, xe : float, optional
The interval to fit. If None, these default to x[0] and x[-1]
respectively.
k : int, optional
The degree of the spline fit. It is recommended to use cubic splines.
Even values of k should be avoided especially with small s values.
1 <= k <= 5
task : {1, 0, -1}, optional
If task==0 find t and c for a given smoothing factor, s.
If task==1 find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1 for the same
set of data (t will be stored an used internally)
If task=-1 find the weighted least square spline for a given set of
knots, t. These should be interior knots as knots on the ends will be
added automatically.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s where g(x)
is the smoothed interpolation of (x,y). The user can use s to control
the tradeoff between closeness and smoothness of fit. Larger s means
more smoothing while smaller values of s indicate less smoothing.
Recommended values of s depend on the weights, w. If the weights
represent the inverse of the standard-deviation of y, then a good s
value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is
the number of datapoints in x, y, and w. default : s=m-sqrt(2*m) if
weights are supplied. s = 0.0 (interpolating) if no weights are
supplied.
t : array_like, optional
The knots needed for task=-1. If given then task is automatically set
to -1.
full_output : bool, optional
If non-zero, then return optional outputs.
per : bool, optional
If non-zero, data points are considered periodic with period x[m-1] -
x[0] and a smooth periodic spline approximation is returned. Values of
y[m-1] and w[m-1] are not used.
quiet : bool, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
fp : array, optional
The weighted sum of squared residuals of the spline approximation.
ier : int, optional
An integer flag about splrep success. Success is indicated if ier<=0.
If ier in [1,2,3] an error occurred but was not raised. Otherwise an
error is raised.
msg : str, optional
A message corresponding to the integer flag, ier.
See Also
--------
UnivariateSpline, BivariateSpline
splprep, splev, sproot, spalde, splint
bisplrep, bisplev
BSpline
make_interp_spline
Notes
-----
See `splev` for evaluation of the spline and its derivatives. Uses the
FORTRAN routine ``curfit`` from FITPACK.
The user is responsible for assuring that the values of `x` are unique.
Otherwise, `splrep` will not return sensible results.
If provided, knots `t` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
This routine zero-pads the coefficients array ``c`` to have the same length
as the array of knots ``t`` (the trailing ``k + 1`` coefficients are ignored
by the evaluation routines, `splev` and `BSpline`.) This is in contrast with
`splprep`, which does not zero-pad the coefficients.
References
----------
Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:
.. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
integration of experimental data using spline functions",
J.Comp.Appl.Maths 1 (1975) 165-184.
.. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
1286-1304.
.. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
.. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import splev, splrep
>>> x = np.linspace(0, 10, 10)
>>> y = np.sin(x)
>>> spl = splrep(x, y)
>>> x2 = np.linspace(0, 10, 200)
>>> y2 = splev(x2, spl)
>>> plt.plot(x, y, 'o', x2, y2)
>>> plt.show()
"""
res = _impl.splrep(x, y, w, xb, xe, k, task, s, t, full_output, per, quiet)
return res
def splev(x, tck, der=0, ext=0):
"""
Evaluate a B-spline or its derivatives.
Given the knots and coefficients of a B-spline representation, evaluate
the value of the smoothing polynomial and its derivatives. This is a
wrapper around the FORTRAN routines splev and splder of FITPACK.
Parameters
----------
x : array_like
An array of points at which to return the value of the smoothed
spline or its derivatives. If `tck` was returned from `splprep`,
then the parameter values, u should be given.
tck : 3-tuple or a BSpline object
If a tuple, then it should be a sequence of length 3 returned by
`splrep` or `splprep` containing the knots, coefficients, and degree
of the spline. (Also see Notes.)
der : int, optional
The order of derivative of the spline to compute (must be less than
or equal to k).
ext : int, optional
Controls the value returned for elements of ``x`` not in the
interval defined by the knot sequence.
* if ext=0, return the extrapolated value.
* if ext=1, return 0
* if ext=2, raise a ValueError
* if ext=3, return the boundary value.
The default value is 0.
Returns
-------
y : ndarray or list of ndarrays
An array of values representing the spline function evaluated at
the points in `x`. If `tck` was returned from `splprep`, then this
is a list of arrays representing the curve in N-dimensional space.
Notes
-----
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using `BSpline` objects.
See Also
--------
splprep, splrep, sproot, spalde, splint
bisplrep, bisplev
BSpline
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
if isinstance(tck, BSpline):
if tck.c.ndim > 1:
mesg = ("Calling splev() with BSpline objects with c.ndim > 1 is "
"not recommended. Use BSpline.__call__(x) instead.")
warnings.warn(mesg, DeprecationWarning)
# remap the out-of-bounds behavior
try:
extrapolate = {0: True, }[ext]
except KeyError:
raise ValueError("Extrapolation mode %s is not supported "
"by BSpline." % ext)
return tck(x, der, extrapolate=extrapolate)
else:
return _impl.splev(x, tck, der, ext)
def splint(a, b, tck, full_output=0):
"""
Evaluate the definite integral of a B-spline between two given points.
Parameters
----------
a, b : float
The end-points of the integration interval.
tck : tuple or a BSpline instance
If a tuple, then it should be a sequence of length 3, containing the
vector of knots, the B-spline coefficients, and the degree of the
spline (see `splev`).
full_output : int, optional
Non-zero to return optional output.
Returns
-------
integral : float
The resulting integral.
wrk : ndarray
An array containing the integrals of the normalized B-splines
defined on the set of knots.
(Only returned if `full_output` is non-zero)
Notes
-----
`splint` silently assumes that the spline function is zero outside the data
interval (`a`, `b`).
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using the `BSpline` objects.
See Also
--------
splprep, splrep, sproot, spalde, splev
bisplrep, bisplev
BSpline
References
----------
.. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines",
J. Inst. Maths Applics, 17, p.37-41, 1976.
.. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
if isinstance(tck, BSpline):
if tck.c.ndim > 1:
mesg = ("Calling splint() with BSpline objects with c.ndim > 1 is "
"not recommended. Use BSpline.integrate() instead.")
warnings.warn(mesg, DeprecationWarning)
if full_output != 0:
mesg = ("full_output = %s is not supported. Proceeding as if "
"full_output = 0" % full_output)
return tck.integrate(a, b, extrapolate=False)
else:
return _impl.splint(a, b, tck, full_output)
def sproot(tck, mest=10):
"""
Find the roots of a cubic B-spline.
Given the knots (>=8) and coefficients of a cubic B-spline return the
roots of the spline.
Parameters
----------
tck : tuple or a BSpline object
If a tuple, then it should be a sequence of length 3, containing the
vector of knots, the B-spline coefficients, and the degree of the
spline.
The number of knots must be >= 8, and the degree must be 3.
The knots must be a montonically increasing sequence.
mest : int, optional
An estimate of the number of zeros (Default is 10).
Returns
-------
zeros : ndarray
An array giving the roots of the spline.
Notes
-----
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using the `BSpline` objects.
See also
--------
splprep, splrep, splint, spalde, splev
bisplrep, bisplev
BSpline
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
if isinstance(tck, BSpline):
if tck.c.ndim > 1:
mesg = ("Calling sproot() with BSpline objects with c.ndim > 1 is "
"not recommended.")
warnings.warn(mesg, DeprecationWarning)
t, c, k = tck.tck
# _impl.sproot expects the interpolation axis to be last, so roll it.
# NB: This transpose is a no-op if c is 1D.
sh = tuple(range(c.ndim))
c = c.transpose(sh[1:] + (0,))
return _impl.sproot((t, c, k), mest)
else:
return _impl.sproot(tck, mest)
def spalde(x, tck):
"""
Evaluate all derivatives of a B-spline.
Given the knots and coefficients of a cubic B-spline compute all
derivatives up to order k at a point (or set of points).
Parameters
----------
x : array_like
A point or a set of points at which to evaluate the derivatives.
Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`.
tck : tuple
A tuple ``(t, c, k)``, containing the vector of knots, the B-spline
coefficients, and the degree of the spline (see `splev`).
Returns
-------
results : {ndarray, list of ndarrays}
An array (or a list of arrays) containing all derivatives
up to order k inclusive for each point `x`.
See Also
--------
splprep, splrep, splint, sproot, splev, bisplrep, bisplev,
BSpline
References
----------
.. [1] C. de Boor: On calculating with b-splines, J. Approximation Theory
6 (1972) 50-62.
.. [2] M. G. Cox : The numerical evaluation of b-splines, J. Inst. Maths
applics 10 (1972) 134-149.
.. [3] P. Dierckx : Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
if isinstance(tck, BSpline):
raise TypeError("spalde does not accept BSpline instances.")
else:
return _impl.spalde(x, tck)
def insert(x, tck, m=1, per=0):
"""
Insert knots into a B-spline.
Given the knots and coefficients of a B-spline representation, create a
new B-spline with a knot inserted `m` times at point `x`.
This is a wrapper around the FORTRAN routine insert of FITPACK.
Parameters
----------
x (u) : array_like
A 1-D point at which to insert a new knot(s). If `tck` was returned
from ``splprep``, then the parameter values, u should be given.
tck : a `BSpline` instance or a tuple
If tuple, then it is expected to be a tuple (t,c,k) containing
the vector of knots, the B-spline coefficients, and the degree of
the spline.
m : int, optional
The number of times to insert the given knot (its multiplicity).
Default is 1.
per : int, optional
If non-zero, the input spline is considered periodic.
Returns
-------
BSpline instance or a tuple
A new B-spline with knots t, coefficients c, and degree k.
``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline.
In case of a periodic spline (``per != 0``) there must be
either at least k interior knots t(j) satisfying ``t(k+1)<t(j)<=x``
or at least k interior knots t(j) satisfying ``x<=t(j)<t(n-k)``.
A tuple is returned iff the input argument `tck` is a tuple, otherwise
a BSpline object is constructed and returned.
Notes
-----
Based on algorithms from [1]_ and [2]_.
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using the `BSpline` objects.
References
----------
.. [1] W. Boehm, "Inserting new knots into b-spline curves.",
Computer Aided Design, 12, p.199-201, 1980.
.. [2] P. Dierckx, "Curve and surface fitting with splines, Monographs on
Numerical Analysis", Oxford University Press, 1993.
"""
if isinstance(tck, BSpline):
t, c, k = tck.tck
# FITPACK expects the interpolation axis to be last, so roll it over
# NB: if c array is 1D, transposes are no-ops
sh = tuple(range(c.ndim))
c = c.transpose(sh[1:] + (0,))
t_, c_, k_ = _impl.insert(x, (t, c, k), m, per)
# and roll the last axis back
c_ = np.asarray(c_)
c_ = c_.transpose((sh[-1],) + sh[:-1])
return BSpline(t_, c_, k_)
else:
return _impl.insert(x, tck, m, per)
def splder(tck, n=1):
"""
Compute the spline representation of the derivative of a given spline
Parameters
----------
tck : BSpline instance or a tuple of (t, c, k)
Spline whose derivative to compute
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
`BSpline` instance or tuple
Spline of order k2=k-n representing the derivative
of the input spline.
A tuple is returned iff the input argument `tck` is a tuple, otherwise
a BSpline object is constructed and returned.
Notes
-----
.. versionadded:: 0.13.0
See Also
--------
splantider, splev, spalde
BSpline
Examples
--------
This can be used for finding maxima of a curve:
>>> from scipy.interpolate import splrep, splder, sproot
>>> x = np.linspace(0, 10, 70)
>>> y = np.sin(x)
>>> spl = splrep(x, y, k=4)
Now, differentiate the spline and find the zeros of the
derivative. (NB: `sproot` only works for order 3 splines, so we
fit an order 4 spline):
>>> dspl = splder(spl)
>>> sproot(dspl) / np.pi
array([ 0.50000001, 1.5 , 2.49999998])
This agrees well with roots :math:`\\pi/2 + n\\pi` of
:math:`\\cos(x) = \\sin'(x)`.
"""
if isinstance(tck, BSpline):
return tck.derivative(n)
else:
return _impl.splder(tck, n)
def splantider(tck, n=1):
"""
Compute the spline for the antiderivative (integral) of a given spline.
Parameters
----------
tck : BSpline instance or a tuple of (t, c, k)
Spline whose antiderivative to compute
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
BSpline instance or a tuple of (t2, c2, k2)
Spline of order k2=k+n representing the antiderivative of the input
spline.
A tuple is returned iff the input argument `tck` is a tuple, otherwise
a BSpline object is constructed and returned.
See Also
--------
splder, splev, spalde
BSpline
Notes
-----
The `splder` function is the inverse operation of this function.
Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo
rounding error.
.. versionadded:: 0.13.0
Examples
--------
>>> from scipy.interpolate import splrep, splder, splantider, splev
>>> x = np.linspace(0, np.pi/2, 70)
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
>>> spl = splrep(x, y)
The derivative is the inverse operation of the antiderivative,
although some floating point error accumulates:
>>> splev(1.7, spl), splev(1.7, splder(splantider(spl)))
(array(2.1565429877197317), array(2.1565429877201865))
Antiderivative can be used to evaluate definite integrals:
>>> ispl = splantider(spl)
>>> splev(np.pi/2, ispl) - splev(0, ispl)
2.2572053588768486
This is indeed an approximation to the complete elliptic integral
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
>>> from scipy.special import ellipk
>>> ellipk(0.8)
2.2572053268208538
"""
if isinstance(tck, BSpline):
return tck.antiderivative(n)
else:
return _impl.splantider(tck, n)
|
mit
|
schae234/PonyTools
|
scripts/Make670.py
|
1
|
3208
|
import MNEcTools.Tools as tools
import pandas as pd
def main():
# Read in the The SNP List
TSL = pd.read_table("THE_SNP_LIST.tsv",sep=',')
INFO = pd.DataFrame(tools.read_vcf("/project/mccuelab/DataRepository/Variant/000_SNP/SNP_LIST.0.9.vcf"))
INFO = INFO.convert_objects(convert_numeric=True)
TSL = TSL.merge(right=INFO,how='left',left_on=['cust_chr','cust_pos'],right_on=['chrom','pos'])
BEST = TSL[TSL.BestProbeset == 1]
# get stats for each tissue type and conversion type
BEST.groupby(['tissue','ConversionType']).apply(len)
# Split out blood and hair for the best probes:
BLOOD = TSL[(TSL.BestProbeset == 1)&(TSL.tissue=='blood')]
HAIR = TSL[(TSL.BestProbeset == 1)&(TSL.tissue=='hair')]
# Compare hair and blood membership
blood_only = set(BLOOD[BLOOD.ConversionType == 'PolyHighResolution'].snpid).difference(set(HAIR[HAIR.ConversionType == 'PolyHighResolution'].snpid))
hair_only = set(HAIR[HAIR.ConversionType == 'PolyHighResolution'].snpid).difference(set(BLOOD[BLOOD.ConversionType == 'PolyHighResolution'].snpid))
# Calculate HWE for hair and blood samples seperately
BLOOD['HWE']=BLOOD[['n_AA','n_AB','n_BB']].apply(lambda x: tools.HW_chi(*x)[1],axis=1)
HAIR['HWE']=HAIR[['n_AA','n_AB','n_BB']].apply(lambda x: tools.HW_chi(*x)[1],axis=1)
# Make final list of:
# HighResPoly
# HighResMono
# NoMinorHomo with HWE > 10e-5
TagSet = pd.concat([
BLOOD[['probeset_id','cust_chr','cust_pos','cust_id','tissue','ConversionType']],
HAIR[['probeset_id','cust_chr','cust_pos','cust_id','tissue','ConversionType']]
]).drop_duplicates(cols='probeset_id')
# Sometimes blood and hair disagree on loci with two probes. Sort by Conversion Type
# then drop duplicates
ConversionRank = {
'PolyHighResolution' : 1,
'MonoHighResolution' : 2,
'NoMinorHom' : 3
}
TagSet['ProbeTie'] = TagSet['ConversionType'].apply(lambda x: ConversionRank[x] if x in ConversionRank else 4)
TagSet = TagSet.sort(['cust_id','ProbeTie']).drop_duplicates('cust_id')
# REad in the Genotype
blood = AxiomGenos.from_files(
"/project/mccuelab/rob/MNEc2M/Affy_MNEc2M/CallTwo/Samples.csv",
*["/project/mccuelab/rob/MNEc2M/Affy_MNEc2M/CallTwo/{}/{}/geno/AxiomGT1.calls.txt".format(x,y)
for x in ['A','B','C'] for y in ['blood']]
)
hair = AxiomGenos.from_files(
"/project/mccuelab/rob/MNEc2M/Affy_MNEc2M/CallTwo/Samples.csv",
*["/project/mccuelab/rob/MNEc2M/Affy_MNEc2M/CallTwo/{}/{}/geno/AxiomGT1.calls.txt".format(x,y)
for x in ['A','B','C'] for y in ['hair']]
)
geno=blood
geno.add_samples(hair.genos)
geno.fasta = Fasta.from_file('/project/mccuelab/DataRepository/Fastas/Equus_cab_nucl_wChrUn1_2.fasta')
annot = annotation.from_files(
"Axiom_MNEc2M_A_Annotation.r1.csv",
"Axiom_MNEc2M_B_Annotation.r1.csv",
"Axiom_MNEc2M_C_Annotation.r1.csv"
)
geno.annots = annot
geno.to_vcf('/project/mccuelab/rob/CompleteVCF/MNE/SNP.vcf')
if __name__ == '__main__':
sys.exit(main(argv[1:]))
|
mit
|
hds-lab/coding-ml
|
msgvis/apps/coding/utils.py
|
1
|
3911
|
import numpy
from sklearn import svm
from sklearn.externals import joblib
from django.db.models import Q
from operator import or_
def get_formatted_X(dictionary, source, messages, feature_index_map, feature_num, use_tfidf=False, master_messages=[]):
message_num = len(messages) + len(master_messages)
source_list = ["system", source]
filter_ors = []
for source in source_list:
if source == "system":
filter_ors.append(("feature__source__isnull", True))
else:
filter_ors.append(("feature__source", source))
X = numpy.zeros((message_num, feature_num), dtype=numpy.float64)
for idx, msg in enumerate(messages):
message_feature_scores = msg.feature_scores.filter(feature__dictionary=dictionary, feature__valid=True)
message_feature_scores = message_feature_scores.filter(reduce(or_, [Q(x) for x in filter_ors])).all()
for feature_score in message_feature_scores:
index = feature_index_map[feature_score.feature_index]
X[idx, index] = feature_score.tfidf if use_tfidf else feature_score.count
if len(master_messages) > 0:
for idx, msg in enumerate(master_messages):
message_feature_scores = msg.feature_scores.filter(feature__dictionary=dictionary, feature__valid=True)
message_feature_scores = message_feature_scores.filter(reduce(or_, [Q(x) for x in filter_ors])).all()
for feature_score in message_feature_scores:
index = feature_index_map[feature_score.feature_index]
X[idx, index] = feature_score.tfidf if use_tfidf else feature_score.count
return X
def get_formatted_y(source, messages, master_messages=[]):
code_num = 0
code_map = {}
code_map_inverse = {}
y = []
for idx, msg in enumerate(messages):
code_id = msg.code_assignments.filter(source=source,
valid=True,
is_user_labeled=True).order_by("-last_updated").first().code.id
code_index = code_map.get(code_id)
if code_index is None:
code_index = code_num
code_map[code_id] = code_index
code_map_inverse[code_index] = code_id
code_num += 1
y.append(code_index)
if len(master_messages) > 0:
for idx, msg in enumerate(master_messages):
code_id = msg.code_assignments.filter(source__username="master",
valid=True,
is_user_labeled=True).order_by("-last_updated").first().code.id
code_index = code_map.get(code_id)
if code_index is None:
code_index = code_num
code_map[code_id] = code_index
code_map_inverse[code_index] = code_id
code_num += 1
y.append(code_index)
return y, code_map_inverse
def get_formatted_data(dictionary, source, messages, feature_index_map, feature_num, use_tfidf=False, master_messages=[]):
X = get_formatted_X(dictionary, source, messages, feature_index_map, feature_num, use_tfidf, master_messages)
y, code_map_inverse = get_formatted_y(source, messages, master_messages)
return X, y, code_map_inverse
def train_model(X, y, model_save_path=None):
lin_clf = svm.LinearSVC()
lin_clf.fit(X, y)
if model_save_path:
joblib.dump(lin_clf, model_save_path + "/model.pkl")
return lin_clf
def get_prediction(lin_model, X):
prediction = lin_model.predict(X)
if hasattr(lin_model, "predict_proba"):
prob = lin_model.predict_proba(X)[:, 1]
else: # use decision function
prob = lin_model.decision_function(X)
min = prob.min()
max = prob.max()
prob = \
(prob - min) / (max - min)
return prediction, prob
|
mit
|
dsullivan7/scikit-learn
|
sklearn/tests/test_kernel_ridge.py
|
342
|
3027
|
import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
|
bsd-3-clause
|
Reagankm/KnockKnock
|
venv/lib/python3.4/site-packages/matplotlib/tests/test_png.py
|
10
|
1259
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import glob
import os
import numpy as np
from matplotlib.testing.decorators import image_comparison
from matplotlib import pyplot as plt
import matplotlib.cm as cm
@image_comparison(baseline_images=['pngsuite'], extensions=['png'])
def test_pngsuite():
dirname = os.path.join(
os.path.dirname(__file__),
'baseline_images',
'pngsuite')
files = glob.glob(os.path.join(dirname, 'basn*.png'))
files.sort()
fig = plt.figure(figsize=(len(files), 2))
for i, fname in enumerate(files):
data = plt.imread(fname)
cmap = None # use default colormap
if data.ndim == 2:
# keep grayscale images gray
cmap = cm.gray
plt.imshow(data, extent=[i, i + 1, 0, 1], cmap=cmap)
plt.gca().patch.set_facecolor("#ddffff")
plt.gca().set_xlim(0, len(files))
def test_imread_png_uint16():
from matplotlib import _png
img = _png.read_png_int(os.path.join(os.path.dirname(__file__),
'baseline_images/test_png/uint16.png'))
assert (img.dtype == np.uint16)
assert np.sum(img.flatten()) == 134184960
|
gpl-2.0
|
GuessWhoSamFoo/pandas
|
pandas/core/dtypes/inference.py
|
1
|
10105
|
""" basic inference routines """
from numbers import Number
import re
import numpy as np
from pandas._libs import lib
from pandas.compat import (
PY2, Set, re_type, string_and_binary_types, string_types, text_type)
from pandas import compat
is_bool = lib.is_bool
is_integer = lib.is_integer
is_float = lib.is_float
is_complex = lib.is_complex
is_scalar = lib.is_scalar
is_decimal = lib.is_decimal
is_interval = lib.is_interval
def is_number(obj):
"""
Check if the object is a number.
Returns True when the object is a number, and False if is not.
Parameters
----------
obj : any type
The object to check if is a number.
Returns
-------
is_number : bool
Whether `obj` is a number or not.
See Also
--------
pandas.api.types.is_integer: Checks a subgroup of numbers.
Examples
--------
>>> pd.api.types.is_number(1)
True
>>> pd.api.types.is_number(7.15)
True
Booleans are valid because they are int subclass.
>>> pd.api.types.is_number(False)
True
>>> pd.api.types.is_number("foo")
False
>>> pd.api.types.is_number("5")
False
"""
return isinstance(obj, (Number, np.number))
def is_string_like(obj):
"""
Check if the object is a string.
Parameters
----------
obj : The object to check
Examples
--------
>>> is_string_like("foo")
True
>>> is_string_like(1)
False
Returns
-------
is_str_like : bool
Whether `obj` is a string or not.
"""
return isinstance(obj, (text_type, string_types))
def _iterable_not_string(obj):
"""
Check if the object is an iterable but not a string.
Parameters
----------
obj : The object to check.
Returns
-------
is_iter_not_string : bool
Whether `obj` is a non-string iterable.
Examples
--------
>>> _iterable_not_string([1, 2, 3])
True
>>> _iterable_not_string("foo")
False
>>> _iterable_not_string(1)
False
"""
return (isinstance(obj, compat.Iterable) and
not isinstance(obj, string_types))
def is_iterator(obj):
"""
Check if the object is an iterator.
For example, lists are considered iterators
but not strings or datetime objects.
Parameters
----------
obj : The object to check
Returns
-------
is_iter : bool
Whether `obj` is an iterator.
Examples
--------
>>> is_iterator([1, 2, 3])
True
>>> is_iterator(datetime(2017, 1, 1))
False
>>> is_iterator("foo")
False
>>> is_iterator(1)
False
"""
if not hasattr(obj, '__iter__'):
return False
if PY2:
return hasattr(obj, 'next')
else:
# Python 3 generators have
# __next__ instead of next
return hasattr(obj, '__next__')
def is_file_like(obj):
"""
Check if the object is a file-like object.
For objects to be considered file-like, they must
be an iterator AND have either a `read` and/or `write`
method as an attribute.
Note: file-like objects must be iterable, but
iterable objects need not be file-like.
.. versionadded:: 0.20.0
Parameters
----------
obj : The object to check
Returns
-------
is_file_like : bool
Whether `obj` has file-like properties.
Examples
--------
>>> buffer(StringIO("data"))
>>> is_file_like(buffer)
True
>>> is_file_like([1, 2, 3])
False
"""
if not (hasattr(obj, 'read') or hasattr(obj, 'write')):
return False
if not hasattr(obj, "__iter__"):
return False
return True
def is_re(obj):
"""
Check if the object is a regex pattern instance.
Parameters
----------
obj : The object to check
Returns
-------
is_regex : bool
Whether `obj` is a regex pattern.
Examples
--------
>>> is_re(re.compile(".*"))
True
>>> is_re("foo")
False
"""
return isinstance(obj, re_type)
def is_re_compilable(obj):
"""
Check if the object can be compiled into a regex pattern instance.
Parameters
----------
obj : The object to check
Returns
-------
is_regex_compilable : bool
Whether `obj` can be compiled as a regex pattern.
Examples
--------
>>> is_re_compilable(".*")
True
>>> is_re_compilable(1)
False
"""
try:
re.compile(obj)
except TypeError:
return False
else:
return True
def is_list_like(obj, allow_sets=True):
"""
Check if the object is list-like.
Objects that are considered list-like are for example Python
lists, tuples, sets, NumPy arrays, and Pandas Series.
Strings and datetime objects, however, are not considered list-like.
Parameters
----------
obj : The object to check
allow_sets : boolean, default True
If this parameter is False, sets will not be considered list-like
.. versionadded:: 0.24.0
Returns
-------
is_list_like : bool
Whether `obj` has list-like properties.
Examples
--------
>>> is_list_like([1, 2, 3])
True
>>> is_list_like({1, 2, 3})
True
>>> is_list_like(datetime(2017, 1, 1))
False
>>> is_list_like("foo")
False
>>> is_list_like(1)
False
>>> is_list_like(np.array([2]))
True
>>> is_list_like(np.array(2)))
False
"""
return (isinstance(obj, compat.Iterable)
# we do not count strings/unicode/bytes as list-like
and not isinstance(obj, string_and_binary_types)
# exclude zero-dimensional numpy arrays, effectively scalars
and not (isinstance(obj, np.ndarray) and obj.ndim == 0)
# exclude sets if allow_sets is False
and not (allow_sets is False and isinstance(obj, Set)))
def is_array_like(obj):
"""
Check if the object is array-like.
For an object to be considered array-like, it must be list-like and
have a `dtype` attribute.
Parameters
----------
obj : The object to check
Returns
-------
is_array_like : bool
Whether `obj` has array-like properties.
Examples
--------
>>> is_array_like(np.array([1, 2, 3]))
True
>>> is_array_like(pd.Series(["a", "b"]))
True
>>> is_array_like(pd.Index(["2016-01-01"]))
True
>>> is_array_like([1, 2, 3])
False
>>> is_array_like(("a", "b"))
False
"""
return is_list_like(obj) and hasattr(obj, "dtype")
def is_nested_list_like(obj):
"""
Check if the object is list-like, and that all of its elements
are also list-like.
.. versionadded:: 0.20.0
Parameters
----------
obj : The object to check
Returns
-------
is_list_like : bool
Whether `obj` has list-like properties.
Examples
--------
>>> is_nested_list_like([[1, 2, 3]])
True
>>> is_nested_list_like([{1, 2, 3}, {1, 2, 3}])
True
>>> is_nested_list_like(["foo"])
False
>>> is_nested_list_like([])
False
>>> is_nested_list_like([[1, 2, 3], 1])
False
Notes
-----
This won't reliably detect whether a consumable iterator (e. g.
a generator) is a nested-list-like without consuming the iterator.
To avoid consuming it, we always return False if the outer container
doesn't define `__len__`.
See Also
--------
is_list_like
"""
return (is_list_like(obj) and hasattr(obj, '__len__') and
len(obj) > 0 and all(is_list_like(item) for item in obj))
def is_dict_like(obj):
"""
Check if the object is dict-like.
Parameters
----------
obj : The object to check
Returns
-------
is_dict_like : bool
Whether `obj` has dict-like properties.
Examples
--------
>>> is_dict_like({1: 2})
True
>>> is_dict_like([1, 2, 3])
False
"""
for attr in ("__getitem__", "keys", "__contains__"):
if not hasattr(obj, attr):
return False
return True
def is_named_tuple(obj):
"""
Check if the object is a named tuple.
Parameters
----------
obj : The object to check
Returns
-------
is_named_tuple : bool
Whether `obj` is a named tuple.
Examples
--------
>>> Point = namedtuple("Point", ["x", "y"])
>>> p = Point(1, 2)
>>>
>>> is_named_tuple(p)
True
>>> is_named_tuple((1, 2))
False
"""
return isinstance(obj, tuple) and hasattr(obj, '_fields')
def is_hashable(obj):
"""Return True if hash(obj) will succeed, False otherwise.
Some types will pass a test against collections.Hashable but fail when they
are actually hashed with hash().
Distinguish between these and other types by trying the call to hash() and
seeing if they raise TypeError.
Examples
--------
>>> a = ([],)
>>> isinstance(a, collections.Hashable)
True
>>> is_hashable(a)
False
"""
# Unfortunately, we can't use isinstance(obj, collections.Hashable), which
# can be faster than calling hash. That is because numpy scalars on Python
# 3 fail this test.
# Reconsider this decision once this numpy bug is fixed:
# https://github.com/numpy/numpy/issues/5562
try:
hash(obj)
except TypeError:
return False
else:
return True
def is_sequence(obj):
"""
Check if the object is a sequence of objects.
String types are not included as sequences here.
Parameters
----------
obj : The object to check
Returns
-------
is_sequence : bool
Whether `obj` is a sequence of objects.
Examples
--------
>>> l = [1, 2, 3]
>>>
>>> is_sequence(l)
True
>>> is_sequence(iter(l))
False
"""
try:
iter(obj) # Can iterate over it.
len(obj) # Has a length associated with it.
return not isinstance(obj, string_and_binary_types)
except (TypeError, AttributeError):
return False
|
bsd-3-clause
|
Unidata/MetPy
|
v1.0/_downloads/f7fb7b4e11b9030fa7c1b08f65033c97/Station_Plot.py
|
4
|
4574
|
# Copyright (c) 2016,2017 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Station Plot
============
Make a station plot, complete with sky cover and weather symbols.
The station plot itself is pretty straightforward, but there is a bit of code to perform the
data-wrangling (hopefully that situation will improve in the future). Certainly, if you have
existing point data in a format you can work with trivially, the station plot will be simple.
"""
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
from metpy.calc import reduce_point_density
from metpy.cbook import get_test_data
from metpy.io import metar
from metpy.plots import add_metpy_logo, current_weather, sky_cover, StationPlot
###########################################
# The setup
# ---------
#
# First read in the data. We use the metar reader because it simplifies a lot of tasks,
# like dealing with separating text and assembling a pandas dataframe
# https://thredds-test.unidata.ucar.edu/thredds/catalog/noaaport/text/metar/catalog.html
data = metar.parse_metar_file(get_test_data('metar_20190701_1200.txt', as_file_obj=False))
# Drop rows with missing winds
data = data.dropna(how='any', subset=['wind_direction', 'wind_speed'])
###########################################
# This sample data has *way* too many stations to plot all of them. The number
# of stations plotted will be reduced using `reduce_point_density`.
# Set up the map projection
proj = ccrs.LambertConformal(central_longitude=-95, central_latitude=35,
standard_parallels=[35])
# Use the Cartopy map projection to transform station locations to the map and
# then refine the number of stations plotted by setting a 300km radius
point_locs = proj.transform_points(ccrs.PlateCarree(), data['longitude'].values,
data['latitude'].values)
data = data[reduce_point_density(point_locs, 300000.)]
###########################################
# The payoff
# ----------
# Change the DPI of the resulting figure. Higher DPI drastically improves the
# look of the text rendering.
plt.rcParams['savefig.dpi'] = 255
# Create the figure and an axes set to the projection.
fig = plt.figure(figsize=(20, 10))
add_metpy_logo(fig, 1100, 300, size='large')
ax = fig.add_subplot(1, 1, 1, projection=proj)
# Add some various map elements to the plot to make it recognizable.
ax.add_feature(cfeature.LAND)
ax.add_feature(cfeature.OCEAN)
ax.add_feature(cfeature.LAKES)
ax.add_feature(cfeature.COASTLINE)
ax.add_feature(cfeature.STATES)
ax.add_feature(cfeature.BORDERS)
# Set plot bounds
ax.set_extent((-118, -73, 23, 50))
#
# Here's the actual station plot
#
# Start the station plot by specifying the axes to draw on, as well as the
# lon/lat of the stations (with transform). We also the fontsize to 12 pt.
stationplot = StationPlot(ax, data['longitude'].values, data['latitude'].values,
clip_on=True, transform=ccrs.PlateCarree(), fontsize=12)
# Plot the temperature and dew point to the upper and lower left, respectively, of
# the center point. Each one uses a different color.
stationplot.plot_parameter('NW', data['air_temperature'].values, color='red')
stationplot.plot_parameter('SW', data['dew_point_temperature'].values,
color='darkgreen')
# A more complex example uses a custom formatter to control how the sea-level pressure
# values are plotted. This uses the standard trailing 3-digits of the pressure value
# in tenths of millibars.
stationplot.plot_parameter('NE', data['air_pressure_at_sea_level'].values,
formatter=lambda v: format(10 * v, '.0f')[-3:])
# Plot the cloud cover symbols in the center location. This uses the codes made above and
# uses the `sky_cover` mapper to convert these values to font codes for the
# weather symbol font.
stationplot.plot_symbol('C', data['cloud_coverage'].values, sky_cover)
# Same this time, but plot current weather to the left of center, using the
# `current_weather` mapper to convert symbols to the right glyphs.
stationplot.plot_symbol('W', data['present_weather'].values, current_weather)
# Add wind barbs
stationplot.plot_barb(data['eastward_wind'].values, data['northward_wind'].values)
# Also plot the actual text of the station id. Instead of cardinal directions,
# plot further out by specifying a location of 2 increments in x and 0 in y.
stationplot.plot_text((2, 0), data['station_id'].values)
plt.show()
|
bsd-3-clause
|
idaholab/raven
|
framework/CodeInterfaceBaseClass.py
|
1
|
12456
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jan 28, 2015
@author: alfoa
"""
from __future__ import division, print_function, unicode_literals, absolute_import
#External Modules------------------------------------------------------------------------------------
import abc
import os
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from utils import utils
import CsvLoader
#Internal Modules End--------------------------------------------------------------------------------
class CodeInterfaceBase(utils.metaclass_insert(abc.ABCMeta,object)):
"""
Code Interface base class. This class should be the base class for all the code interfaces.
In this way some methods are forced to be implemented and some automatic checking features
are available (checking of the inputs if no executable is available), etc.
NOTE: As said, this class SHOULD be the base class of the code interfaces. However, the developer
of a newer code interface can decide to avoid to inherit from this class if he does not want
to exploit the automatic checking of the code interface's functionalities
"""
def __init__(self):
"""
Constructor
@ In, None
@ Out, None
"""
self.inputExtensions = [] # list of input extensions
self._runOnShell = True # True if the specified command by the code interfaces will be executed through shell.
self._ravenWorkingDir = None # location of RAVEN's main working directory
self._csvLoadUtil = 'pandas' # utility to use to load CSVs
self.printFailedRuns = True # whether to print failed runs to the screen
self._writeCSV = False # write CSV even if the data can be returned directly to raven (e.g. if the user requests them)
def setRunOnShell(self, shell=True):
"""
Method used to set the the executation of code command through shell if shell=True
@ In, shell, Boolean, True if the users want to execute their code through shell
@ Out, None
"""
self._runOnShell = shell
def getRunOnShell(self):
"""
Method to return the status of self._runOnShell
@ In, None
@ Out, None
"""
return self._runOnShell
def getIfWriteCsv(self):
"""
Returns self._writeCSV. True if a CSV is requested by the user even if
the code interface returns the data to RAVEN directly
@ In, None
@ Out, getIfWriteCsv, bool, should we write the csv?
"""
return self._writeCSV
def getCsvLoadUtil(self):
"""
Returns the string representation of the CSV loading utility to use
@ In, None
@ Out, getCsvLoadUtil, str, name of utility to use
"""
# default to pandas, overwrite to 'numpy' if all of the following:
# - all entries are guaranteed to be floats
# - results CSV have a large number of headers (>1000)
return self._csvLoadUtil
def setCsvLoadUtil(self, util):
"""
Returns the string representation of the CSV loading utility to use
@ In, getCsvLoadUtil, str, name of utility to use
"""
ok = CsvLoader.CsvLoader.acceptableUtils
if util not in ok:
raise TypeError(f'Unrecognized CSV loading utility: "{util}"! Expected one of: {ok}')
self._csvLoadUtil = util
def genCommand(self, inputFiles, executable, flags=None, fileArgs=None, preExec=None):
"""
This method is used to retrieve the command (in tuple format) needed to launch the Code.
This method checks a boolean environment variable called 'RAVENinterfaceCheck':
if true, the subcodeCommand is going to be overwritten with an empty string. In this way we can check the functionality of the interface without having an executable.
See Driver.py to understand how this Env variable is set
@ In, inputFiles, list, List of input files (length of the list depends on the number of inputs have been added in the Step is running this code)
@ In, executable, string, executable name with absolute path (e.g. /home/path_to_executable/code.exe)
@ In, flags, dict, optional, dictionary containing the command-line flags the user can specify in the input (e.g. under the node < Code >< clargstype =0 input0arg =0 i0extension =0 .inp0/ >< /Code >)
@ In, fileArgs, dict, optional, a dictionary containing the auxiliary input file variables the user can specify in the input (e.g. under the node < Code >< fileargstype =0 input0arg =0 aux0extension =0 .aux0/ >< /Code >)
@ In, preExec, string, optional, a string the command that needs to be pre-executed before the actual command here defined
@ Out, returnCommand, tuple, tuple containing the generated command. returnCommand[0] is the command to run the code (string), returnCommand[1] is the name of the output root
"""
subcodeCommand,outputfileroot = self.generateCommand(inputFiles,executable,clargs=flags,fargs=fileArgs,preExec=preExec)
if utils.stringIsTrue(os.environ.get('RAVENinterfaceCheck','False')):
return [('parallel','echo')],outputfileroot
returnCommand = subcodeCommand,outputfileroot
return returnCommand
def readMoreXML(self, xmlNode, ravenWorkingDir):
"""
Function to read the portion of the xml input that belongs to this class and
initialize some members based on inputs.
@ In, xmlNode, xml.etree.ElementTree.Element, Xml element node
@ In, ravenWorkingDir, str, location of RAVEN's working directory
@ Out, None
"""
self._ravenWorkingDir = ravenWorkingDir
self._readMoreXML(xmlNode)
# read global options
# should we print CSV even if the data can be directly returned to RAVEN?
csvLog = xmlNode.find("csv")
self._writeCSV = utils.stringIsTrue(csvLog.text if csvLog is not None else "False")
def _readMoreXML(self, xmlNode):
"""
Function to read the portion of the xml input that belongs to this specialized class and
initialize some members based on inputs. This can be overloaded in specialized code interface in order
to read specific flags
@ In, xmlNode, xml.etree.ElementTree.Element, Xml element node
@ Out, None
"""
pass
@abc.abstractmethod
def generateCommand(self, inputFiles, executable, clargs=None, fargs=None, preExec=None):
"""
This method is used to retrieve the command (in tuple format) needed to launch the Code.
@ In, inputFiles, list, List of input files (length of the list depends on the number of inputs have been added in the Step is running this code)
@ In, executable, string, executable name with absolute path (e.g. /home/path_to_executable/code.exe)
@ In, clargs, dict, optional, dictionary containing the command-line flags the user can specify in the input (e.g. under the node < Code >< clargstype =0 input0arg =0 i0extension =0 .inp0/ >< /Code >)
@ In, fargs, dict, optional, a dictionary containing the auxiliary input file variables the user can specify in the input (e.g. under the node < Code >< clargstype =0 input0arg =0 aux0extension =0 .aux0/ >< /Code >)
@ In, preExec, string, optional, a string the command that needs to be pre-executed before the actual command here defined
@ Out, returnCommand, tuple, tuple containing the generated command. returnCommand[0] is the command to run the code (string), returnCommand[1] is the name of the output root
"""
return
@abc.abstractmethod
def createNewInput(self, currentInputFiles, oriInputFiles, samplerType, **Kwargs):
"""
This method is used to generate an input based on the information passed in.
@ In, currentInputFiles, list, list of current input files (input files from last this method call)
@ In, oriInputFiles, list, list of the original input files
@ In, samplerType, string, Sampler type (e.g. MonteCarlo, Adaptive, etc. see manual Samplers section)
@ In, Kwargs, dictionary, kwarded dictionary of parameters. In this dictionary there is another dictionary called "SampledVars"
where RAVEN stores the variables that got sampled (e.g. Kwargs['SampledVars'] => {'var1':10,'var2':40})
@ Out, newInputFiles, list, list of newer input files, list of the new input files (modified and not)
"""
pass
####################
####### OPTIONAL METHODS #######
####################
def getInputExtension(self):
"""
This method returns a list of extension the code interface accepts for the input file (the main one)
@ In, None
@ Out, tuple, tuple of strings containing accepted input extension (e.g.(".i",".inp"]) )
"""
return tuple(self.inputExtensions)
def setInputExtension(self,exts):
"""
This method sets a list of extension the code interface accepts for the input files
@ In, exts, list, list or other array containing accepted input extension (e.g.[".i",".inp"])
@ Out, None
"""
self.inputExtensions = []
self.addInputExtension(exts)
def addInputExtension(self,exts):
"""
This method adds a list of extension the code interface accepts for the input files
@ In, exts, list, list or other array containing accepted input extension (e.g.[".i",".inp"])
@ Out, None
"""
for e in exts:
self.inputExtensions.append(e)
def addDefaultExtension(self):
"""
This method sets a list of default extensions a specific code interface accepts for the input files.
This method should be overwritten if these are not acceptable defaults.
@ In, None
@ Out, None
"""
self.addInputExtension(['i','inp','in'])
def initialize(self, runInfo, oriInputFiles):
"""
Method to initialize the run of a new step
@ In, runInfo, dict, dictionary of the info in the <RunInfo> XML block
@ In, oriInputFiles, list, list of the original input files
@ Out, None
"""
# store working dir for future needs
self._ravenWorkingDir = runInfo['WorkingDir']
def finalizeCodeOutput(self, command, output, workingDir):
"""
this method is called by the RAVEN code at the end of each run (if the method is present).
It can be used for those codes, that do not create CSV files to convert the whatever output format into a csv
@ In, command, string, the command used to run the just ended job
@ In, output, string, the Output name root
@ In, workingDir, string, current working dir
@ Out, output, string or dict, optional, if present and string:
in case the root of the output file gets changed in this method (and a CSV is produced);
if present and dict:
in case the output of the code is directly stored in a dictionary and can be directly used
without the need that RAVEN reads an additional CSV
"""
return output
def checkForOutputFailure(self, output, workingDir):
"""
This method is called by RAVEN at the end of each run if the return code is == 0.
This method needs to be implemented by the codes that, if the run fails, return a return code that is 0
This can happen in those codes that record the failure of the job (e.g. not converged, etc.) as normal termination (returncode == 0)
This method can be used, for example, to parse the output file looking for a special keyword that testifies that a particular job got failed
(e.g. in RELAP5 would be the keyword "********")
@ In, output, string, the Output name root
@ In, workingDir, string, current working dir
@ Out, failure, bool, True if the job is failed, False otherwise
"""
failure = False
return failure
|
apache-2.0
|
xodus7/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/kmeans_test.py
|
39
|
20233
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for KMeans."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
import numpy as np
from sklearn.cluster import KMeans as SklearnKMeans
# pylint: disable=g-import-not-at-top
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.estimators import kmeans as kmeans_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
FLAGS = flags.FLAGS
def normalize(x):
return x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))
def cosine_similarity(x, y):
return np.dot(normalize(x), np.transpose(normalize(y)))
def make_random_centers(num_centers, num_dims, center_norm=500):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * center_norm)
def make_random_points(centers, num_points, max_offset=20):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * max_offset)
return (centers[assignments] + offsets, assignments, np.add.reduce(
offsets * offsets, 1))
class KMeansTestBase(test.TestCase):
def input_fn(self,
batch_size=None,
points=None,
randomize=None,
num_epochs=None):
"""Returns an input_fn that randomly selects batches from given points."""
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
if randomize is None:
randomize = (self.use_mini_batch and
self.mini_batch_steps_per_iteration <= 1)
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return input_lib.limit_epochs(x, num_epochs=num_epochs), None
if randomize:
indices = random_ops.random_uniform(
constant_op.constant([batch_size]),
minval=0,
maxval=num_points - 1,
dtype=dtypes.int32,
seed=10)
else:
# We need to cycle through the indices sequentially. We create a queue
# to maintain the list of indices.
q = data_flow_ops.FIFOQueue(num_points, dtypes.int32, ())
# Conditionally initialize the Queue.
def _init_q():
with ops.control_dependencies(
[q.enqueue_many(math_ops.range(num_points))]):
return control_flow_ops.no_op()
init_q = control_flow_ops.cond(q.size() <= 0, _init_q,
control_flow_ops.no_op)
with ops.control_dependencies([init_q]):
offsets = q.dequeue_many(batch_size)
with ops.control_dependencies([q.enqueue_many(offsets)]):
indices = array_ops.identity(offsets)
batch = array_ops.gather(x, indices)
return (input_lib.limit_epochs(batch, num_epochs=num_epochs), None)
return _fn
@staticmethod
def config(tf_random_seed):
return run_config.RunConfig(tf_random_seed=tf_random_seed)
@property
def initial_clusters(self):
return kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT
@property
def batch_size(self):
return self.num_points
@property
def use_mini_batch(self):
return False
@property
def mini_batch_steps_per_iteration(self):
return 1
class KMeansTest(KMeansTestBase):
def setUp(self):
np.random.seed(3)
self.num_centers = 5
self.num_dims = 2
self.num_points = 1000
self.true_centers = make_random_centers(self.num_centers, self.num_dims)
self.points, _, self.scores = make_random_points(self.true_centers,
self.num_points)
self.true_score = np.add.reduce(self.scores)
def _kmeans(self, relative_tolerance=None):
return kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
random_seed=24,
relative_tolerance=relative_tolerance)
def test_clusters(self):
kmeans = self._kmeans()
kmeans.fit(input_fn=self.input_fn(), steps=1)
clusters = kmeans.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
kmeans = self._kmeans()
kmeans.fit(input_fn=self.input_fn(), steps=1)
score1 = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
steps = 10 * self.num_points // self.batch_size
kmeans.fit(input_fn=self.input_fn(), steps=steps)
score2 = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertTrue(score1 > score2)
self.assertNear(self.true_score, score2, self.true_score * 0.05)
def test_monitor(self):
if self.use_mini_batch:
# We don't test for use_mini_batch case since the loss value can be noisy.
return
kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=learn.RunConfig(tf_random_seed=14),
random_seed=12,
relative_tolerance=1e-4)
kmeans.fit(
input_fn=self.input_fn(),
# Force it to train until the relative tolerance monitor stops it.
steps=None)
score = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertNear(self.true_score, score, self.true_score * 0.01)
def _infer_helper(self, kmeans, clusters, num_points):
points, true_assignments, true_offsets = make_random_points(
clusters, num_points)
# Test predict
assignments = list(
kmeans.predict_cluster_idx(input_fn=self.input_fn(
batch_size=num_points, points=points, num_epochs=1)))
self.assertAllEqual(assignments, true_assignments)
# Test score
score = kmeans.score(
input_fn=lambda: (constant_op.constant(points), None), steps=1)
self.assertNear(score, np.sum(true_offsets), 0.01 * score)
# Test transform
transform = kmeans.transform(
input_fn=lambda: (constant_op.constant(points), None))
true_transform = np.maximum(
0,
np.sum(np.square(points), axis=1,
keepdims=True) - 2 * np.dot(points, np.transpose(clusters)) +
np.transpose(np.sum(np.square(clusters), axis=1, keepdims=True)))
self.assertAllClose(transform, true_transform, rtol=0.05, atol=10)
def test_infer(self):
kmeans = self._kmeans()
# Make a call to fit to initialize the cluster centers.
max_steps = 1
kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
clusters = kmeans.clusters()
# Run inference on small datasets.
self._infer_helper(kmeans, clusters, num_points=10)
self._infer_helper(kmeans, clusters, num_points=1)
class KMeansTestMultiStageInit(KMeansTestBase):
def test_random(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.fit(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.clusters()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_just_right(self):
points = np.array([[1, 2]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.fit(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.clusters()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_too_small(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
with self.assertRaisesOpError(AssertionError):
kmeans.fit(
input_fn=self.input_fn(batch_size=4, points=points, randomize=False),
steps=1)
class MiniBatchKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansCosineDistanceTest(KMeansTestBase):
def setUp(self):
self.points = np.array(
[[2.5, 0.1], [2, 0.2], [3, 0.1], [4, 0.2], [0.1, 2.5], [0.2, 2],
[0.1, 3], [0.2, 4]],
dtype=np.float32)
self.num_points = self.points.shape[0]
self.true_centers = np.array(
[
normalize(
np.mean(normalize(self.points)[0:4, :], axis=0, keepdims=True))[
0],
normalize(
np.mean(normalize(self.points)[4:, :], axis=0, keepdims=True))[
0]
],
dtype=np.float32)
self.true_assignments = np.array([0] * 4 + [1] * 4)
self.true_score = len(self.points) - np.tensordot(
normalize(self.points), self.true_centers[self.true_assignments])
self.num_centers = 2
self.kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
def test_fit(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.clusters())
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
def test_transform(self):
self.kmeans.fit(input_fn=self.input_fn(), steps=10)
centers = normalize(self.kmeans.clusters())
true_transform = 1 - cosine_similarity(self.points, centers)
transform = self.kmeans.transform(input_fn=self.input_fn(
batch_size=self.num_points))
self.assertAllClose(transform, true_transform, atol=1e-3)
def test_predict(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.clusters())
assignments = list(
self.kmeans.predict_cluster_idx(input_fn=self.input_fn(
num_epochs=1, batch_size=self.num_points)))
self.assertAllClose(
centers[assignments],
self.true_centers[self.true_assignments],
atol=1e-2)
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
score = self.kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertAllClose(score, self.true_score, atol=1e-2)
def test_predict_kmeans_plus_plus(self):
# Most points are concetrated near one center. KMeans++ is likely to find
# the less populated centers.
points = np.array(
[[2.5, 3.5], [2.5, 3.5], [-2, 3], [-2, 3], [-3, -3], [-3.1, -3.2],
[-2.8, -3.], [-2.9, -3.1], [-3., -3.1], [-3., -3.1], [-3.2, -3.],
[-3., -3.]],
dtype=np.float32)
true_centers = np.array(
[
normalize(
np.mean(normalize(points)[0:2, :], axis=0, keepdims=True))[0],
normalize(
np.mean(normalize(points)[2:4, :], axis=0, keepdims=True))[0],
normalize(
np.mean(normalize(points)[4:, :], axis=0, keepdims=True))[0]
],
dtype=np.float32)
true_assignments = [0] * 2 + [1] * 2 + [2] * 8
true_score = len(points) - np.tensordot(
normalize(points), true_centers[true_assignments])
kmeans = kmeans_lib.KMeansClustering(
3,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
kmeans.fit(input_fn=lambda: (constant_op.constant(points), None), steps=30)
centers = normalize(kmeans.clusters())
self.assertAllClose(
sorted(centers.tolist()), sorted(true_centers.tolist()), atol=1e-2)
def _input_fn():
return (input_lib.limit_epochs(
constant_op.constant(points), num_epochs=1), None)
assignments = list(kmeans.predict_cluster_idx(input_fn=_input_fn))
self.assertAllClose(
centers[assignments], true_centers[true_assignments], atol=1e-2)
score = kmeans.score(
input_fn=lambda: (constant_op.constant(points), None), steps=1)
self.assertAllClose(score, true_score, atol=1e-2)
class MiniBatchKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansBenchmark(benchmark.Benchmark):
"""Base class for benchmarks."""
def SetUp(self,
dimension=50,
num_clusters=50,
points_per_cluster=10000,
center_norm=500,
cluster_width=20):
np.random.seed(123456)
self.num_clusters = num_clusters
self.num_points = num_clusters * points_per_cluster
self.centers = make_random_centers(
self.num_clusters, dimension, center_norm=center_norm)
self.points, _, scores = make_random_points(
self.centers, self.num_points, max_offset=cluster_width)
self.score = float(np.sum(scores))
def _report(self, num_iters, start, end, scores):
print(scores)
self.report_benchmark(
iters=num_iters,
wall_time=(end - start) / num_iters,
extras={'true_sum_squared_distances': self.score,
'fit_scores': scores})
def _fit(self, num_iters=10):
pass
def benchmark_01_2dim_5center_500point(self):
self.SetUp(dimension=2, num_clusters=5, points_per_cluster=100)
self._fit()
def benchmark_02_20dim_20center_10kpoint(self):
self.SetUp(dimension=20, num_clusters=20, points_per_cluster=500)
self._fit()
def benchmark_03_100dim_50center_50kpoint(self):
self.SetUp(dimension=100, num_clusters=50, points_per_cluster=1000)
self._fit()
def benchmark_03_100dim_50center_50kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=50,
points_per_cluster=1000,
cluster_width=250)
self._fit()
def benchmark_04_100dim_500center_500kpoint(self):
self.SetUp(dimension=100, num_clusters=500, points_per_cluster=1000)
self._fit(num_iters=4)
def benchmark_05_100dim_500center_500kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=500,
points_per_cluster=1000,
cluster_width=250)
self._fit(num_iters=4)
class TensorflowKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting tensorflow KMeans: %d' % i)
tf_kmeans = kmeans_lib.KMeansClustering(
self.num_clusters,
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
kmeans_plus_plus_num_retries=int(math.log(self.num_clusters) + 2),
random_seed=i * 42,
relative_tolerance=1e-6,
config=run_config.RunConfig(tf_random_seed=3))
tf_kmeans.fit(
input_fn=lambda: (constant_op.constant(self.points), None), steps=50)
_ = tf_kmeans.clusters()
scores.append(
tf_kmeans.score(
input_fn=lambda: (constant_op.constant(self.points), None),
steps=1))
self._report(num_iters, start, time.time(), scores)
class SklearnKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting sklearn KMeans: %d' % i)
sklearn_kmeans = SklearnKMeans(
n_clusters=self.num_clusters,
init='k-means++',
max_iter=50,
n_init=1,
tol=1e-4,
random_state=i * 42)
sklearn_kmeans.fit(self.points)
scores.append(sklearn_kmeans.inertia_)
self._report(num_iters, start, time.time(), scores)
class KMeansTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(
capacity=10, dtypes=dtypes.float32, shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(queue, [enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependendent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
kmeans = kmeans_lib.KMeansClustering(5)
kmeans.fit(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
iut-ibk/CityDrain3
|
bench/plot-compare-speedup.py
|
5
|
1333
|
#!/usr/bin/env python
import matplotlib as mpl
mpl.use('cairo')
from pylab import *
import glob
import sys
import os
import re
inch=2.54
if len(sys.argv) < 3:
print "usage plot.py model pid"
exit(-1)
format="pdf"
if len(sys.argv) > 3:
format=sys.argv[3]
rc('font', size=8)
rc('figure.subplot', left=0.12, right=0.94, top=0.97, bottom=0.21)
model = sys.argv[1]
pid = sys.argv[2]
shared = csv2rec('time/%s-shared-%s-time-avg.txt' % (model, pid), delimiter='\t', names=['nthreads', 'runtime'])
shared.speedup = map(lambda r: shared.runtime[0] / r, shared.runtime)
nonshared = csv2rec('time/%s-nonshared-%s-time-avg.txt' % (model, pid), delimiter='\t', names=['nthreads', 'runtime'])
nonshared.speedup = map(lambda r: nonshared.runtime[0] / r, nonshared.runtime)
f=figure(figsize=(8/inch, 5.1/inch))
plot(range(len(shared.nthreads)), range(len(shared.nthreads)))
lines=[]
line=plot(shared.nthreads, shared.speedup)
line[0].set_linestyle('-')
lines.append(line)
line=plot(nonshared.nthreads, nonshared.speedup)
line[0].set_linestyle('--')
lines.append(line)
xlabel('number of threads')#, va='bottom')
ylabel('speedup')#, ha='left')
l=legend(lines, ["shared", "nonshared"], 'best')
l.draw_frame(False)
savefig('imgs/%s-+%s-comp-speedup.%s' % (model, pid, format), format=format, dpi=400)#, bbox_inches="tight")
#show()
|
gpl-2.0
|
ArteliaTelemac/PostTelemac
|
PostTelemac/meshlayerlibs/tri/trifinder.py
|
1
|
3419
|
from __future__ import print_function
"""
from matplotlib.tri import Triangulation
import matplotlib._tri as _tri
"""
from .triangulation import Triangulation
import ._tri
class TriFinder(object):
"""
Abstract base class for classes used to find the triangles of a
Triangulation in which (x,y) points lie.
Rather than instantiate an object of a class derived from TriFinder, it is
usually better to use the function
:func:`matplotlib.tri.Triangulation.get_trifinder`.
Derived classes implement __call__(x,y) where x,y are array_like point
coordinates of the same shape.
"""
def __init__(self, triangulation):
if not isinstance(triangulation, Triangulation):
raise ValueError('Expected a Triangulation object')
self._triangulation = triangulation
class TrapezoidMapTriFinder(TriFinder):
"""
:class:`~matplotlib.tri.TriFinder` class implemented using the trapezoid
map algorithm from the book "Computational Geometry, Algorithms and
Applications", second edition, by M. de Berg, M. van Kreveld, M. Overmars
and O. Schwarzkopf.
The triangulation must be valid, i.e. it must not have duplicate points,
triangles formed from colinear points, or overlapping triangles. The
algorithm has some tolerance to triangles formed from colinear points, but
this should not be relied upon.
"""
def __init__(self, triangulation):
TriFinder.__init__(self, triangulation)
self._cpp_trifinder = _tri.TrapezoidMapTriFinder(
triangulation.get_cpp_triangulation())
"""
self._cpp_trifinder = TrapezoidMapTriFinder(
triangulation.get_cpp_triangulation())
"""
self._initialize()
def __call__(self, x, y):
"""
Return an array containing the indices of the triangles in which the
specified x,y points lie, or -1 for points that do not lie within a
triangle.
*x*, *y* are array_like x and y coordinates of the same shape and any
number of dimensions.
Returns integer array with the same shape and *x* and *y*.
"""
# C++ checks arguments are OK.
return self._cpp_trifinder.find_many(x, y)
def _get_tree_stats(self):
"""
Return a python list containing the statistics about the node tree:
0: number of nodes (tree size)
1: number of unique nodes
2: number of trapezoids (tree leaf nodes)
3: number of unique trapezoids
4: maximum parent count (max number of times a node is repeated in
tree)
5: maximum depth of tree (one more than the maximum number of
comparisons needed to search through the tree)
6: mean of all trapezoid depths (one more than the average number
of comparisons needed to search through the tree)
"""
return self._cpp_trifinder.get_tree_stats()
def _initialize(self):
"""
Initialize the underlying C++ object. Can be called multiple times if,
for example, the triangulation is modified.
"""
self._cpp_trifinder.initialize()
def _print_tree(self):
"""
Print a text representation of the node tree, which is useful for
debugging purposes.
"""
self._cpp_trifinder.print_tree()
|
gpl-3.0
|
yarikoptic/pystatsmodels
|
statsmodels/datasets/utils.py
|
2
|
9940
|
import sys
import shutil
import pickle
from os import environ
from os import makedirs
from os.path import basename
from os.path import expanduser
from os.path import exists
from os.path import expanduser
from os.path import join
from StringIO import StringIO
import time
from urllib2 import urlopen, HTTPError
import numpy as np
from numpy import genfromtxt, array
from pandas import read_csv
class Dataset(dict):
def __init__(self, **kw):
# define some default attributes, so pylint can find them
self.endog = None
self.exog = None
self.data = None
self.names = None
dict.__init__(self,kw)
self.__dict__ = self
# Some datasets have string variables. If you want a raw_data
# attribute you must create this in the dataset's load function.
try: # some datasets have string variables
self.raw_data = self.data.view((float, len(self.names)))
except:
pass
def __repr__(self):
return str(self.__class__)
def process_recarray(data, endog_idx=0, exog_idx=None, stack=True, dtype=None):
names = list(data.dtype.names)
if isinstance(endog_idx, int):
endog = array(data[names[endog_idx]], dtype=dtype)
endog_name = names[endog_idx]
endog_idx = [endog_idx]
else:
endog_name = [names[i] for i in endog_idx]
if stack:
endog = np.column_stack(data[field] for field in endog_name)
else:
endog = data[endog_name]
if exog_idx is None:
exog_name = [names[i] for i in xrange(len(names))
if i not in endog_idx]
else:
exog_name = [names[i] for i in exog_idx]
if stack:
exog = np.column_stack(data[field] for field in exog_name)
else:
exog = data[exog_name]
if dtype:
endog = endog.astype(dtype)
exog = exog.astype(dtype)
dataset = Dataset(data=data, names=names, endog=endog, exog=exog,
endog_name=endog_name, exog_name=exog_name)
return dataset
def process_recarray_pandas(data, endog_idx=0, exog_idx=None, dtype=None,
index_idx=None):
from pandas import DataFrame
data = DataFrame(data, dtype=dtype)
names = data.columns
if isinstance(endog_idx, int):
endog_name = names[endog_idx]
endog = data[endog_name]
if exog_idx is None:
exog = data.drop([endog_name], axis=1)
else:
exog = data.filter(names[exog_idx])
else:
endog = data.ix[:, endog_idx]
endog_name = list(endog.columns)
if exog_idx is None:
exog = data.drop(endog_name, axis=1)
elif isinstance(exog_idx, int):
exog = data.filter([names[exog_idx]])
else:
exog = data.filter(names[exog_idx])
if index_idx is not None: #NOTE: will have to be improved for dates
from pandas import Index
endog.index = Index(data.ix[:, index_idx])
exog.index = Index(data.ix[:, index_idx])
data = data.set_index(names[index_idx])
exog_name = list(exog.columns)
dataset = Dataset(data=data, names=list(names), endog=endog, exog=exog,
endog_name=endog_name, exog_name=exog_name)
return dataset
def _maybe_reset_index(data):
"""
All the Rdatasets have the integer row.labels from R if there is no
real index. Strip this for a zero-based index
"""
from pandas import Index
if data.index.equals(Index(range(1,len(data)+1))):
data = data.reset_index(drop=True)
return data
def _get_cache(cache):
if cache is False:
# do not do any caching or load from cache
cache = None
elif cache is True: # use default dir for cache
cache = get_data_home(None)
else:
cache = get_data_home(cache)
return cache
def _cache_it(data, cache_path):
if sys.version_info[0] >= 3:
# for some reason encode("zip") won't work for me in Python 3?
import zlib
# use protocol 2 so can open with python 2.x if cached in 3.x
open(cache_path, "wb").write(zlib.compress(pickle.dumps(data,
protocol=2)))
else:
open(cache_path, "wb").write(pickle.dumps(data).encode("zip"))
def _open_cache(cache_path):
if sys.version_info[0] >= 3:
#NOTE: don't know why but decode('zip') doesn't work on my
# Python 3 build
import zlib
data = zlib.decompress(open(cache_path, 'rb').read())
# return as bytes object encoded in utf-8 for cross-compat of cached
data = pickle.loads(data).encode('utf-8')
else:
data = open(cache_path, 'rb').read().decode('zip')
data = pickle.loads(data)
return data
def _urlopen_cached(url, cache):
"""
Tries to load data from cache location otherwise downloads it. If it
downloads the data and cache is not None then it will put the downloaded
data in the cache path.
"""
from_cache = False
if cache is not None:
cache_path = join(cache,
url.split("://")[-1].replace('/', ',') +".zip")
try:
data = _open_cache(cache_path)
from_cache = True
except:
pass
# not using the cache or didn't find it in cache
if not from_cache:
data = urlopen(url).read()
if cache is not None: # then put it in the cache
_cache_it(data, cache_path)
return data, from_cache
def _get_data(base_url, dataname, cache, extension="csv"):
url = base_url + (dataname + ".%s") % extension
try:
data, from_cache = _urlopen_cached(url, cache)
except HTTPError, err:
if '404' in str(err):
raise ValueError("Dataset %s was not found." % dataname)
else:
raise err
#Python 3, always decode as unicode
if sys.version[0] == '3': # pragma: no cover
data = data.decode('utf-8', errors='strict')
return StringIO(data), from_cache
def _get_dataset_meta(dataname, package, cache):
# get the index, you'll probably want this cached because you have
# to download info about all the data to get info about any of the data...
index_url = ("https://raw.github.com/vincentarelbundock/Rdatasets/master/"
"datasets.csv")
data, _ = _urlopen_cached(index_url, cache)
#Python 3
if sys.version[0] == '3': # pragma: no cover
data = data.decode('ascii', errors='strict')
index = read_csv(StringIO(data))
idx = np.logical_and(index.Item == dataname, index.Package == package)
dataset_meta = index.ix[idx]
return dataset_meta["Title"].item()
def get_rdataset(dataname, package="datasets", cache=False):
"""download and return R dataset
Parameters
----------
dataname : str
The name of the dataset you want to download
package : str
The package in which the dataset is found. The default is the core
'datasets' package.
cache : bool or str
If True, will download this data into the STATSMODELS_DATA folder.
The default location is a folder called statsmodels_data in the
user home folder. Otherwise, you can specify a path to a folder to
use for caching the data. If False, the data will not be cached.
Returns
-------
dataset : Dataset instance
A `statsmodels.data.utils.Dataset` instance. This objects has
attributes::
* data - A pandas DataFrame containing the data
* title - The dataset title
* package - The package from which the data came
* from_cache - Whether not cached data was retrieved
* __doc__ - The verbatim R documentation.
Notes
-----
If the R dataset has an integer index. This is reset to be zero-based.
Otherwise the index is preserved. The caching facilities are dumb. That
is, no download dates, e-tags, or otherwise identifying information
is checked to see if the data should be downloaded again or not. If the
dataset is in the cache, it's used.
"""
#NOTE: use raw github bc html site might not be most up to date
data_base_url = ("https://raw.github.com/vincentarelbundock/Rdatasets/"
"master/csv/"+package+"/")
docs_base_url = ("https://raw.github.com/vincentarelbundock/Rdatasets/"
"master/doc/"+package+"/rst/")
cache = _get_cache(cache)
data, from_cache = _get_data(data_base_url, dataname, cache)
data = read_csv(data, index_col=0)
data = _maybe_reset_index(data)
title = _get_dataset_meta(dataname, package, cache)
doc, _ = _get_data(docs_base_url, dataname, cache, "rst")
return Dataset(data=data, __doc__=doc.read(), package=package, title=title,
from_cache=from_cache)
### The below function were taken from sklearn
def get_data_home(data_home=None):
"""Return the path of the statsmodels data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'statsmodels_data'
in the user home folder.
Alternatively, it can be set by the 'STATSMODELS_DATA' environment
variable or programatically by giving an explit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('STATSMODELS_DATA',
join('~', 'statsmodels_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
|
bsd-3-clause
|
jayansenaratne/aston
|
setup.py
|
3
|
6486
|
#!/usr/bin/env python
# Copyright 2011-2014 Roderick Bovee
#
# This file is part of Aston.
#
# Aston is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Aston is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Aston. If not, see <http://www.gnu.org/licenses/>.
from setuptools import setup
from glob import glob
import matplotlib
import sys
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
options = {
'name': 'Aston',
'version': '0.7.0',
'description': 'Mass/UV Spectral Analysis Program',
'author': 'Roderick Bovee',
'author_email': '[email protected]',
'url': 'http://code.google.com/p/aston',
'license': 'GPLv3',
'platforms': ['Any'],
'classifiers': [
'Development Status :: 4 - Beta',
'Environment :: X11 Applications :: Qt',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering :: Chemistry'
],
'long_description': read('README.rst'),
'packages': ['aston', 'aston.calibrations', 'aston.database', \
'aston.peaks', 'aston.qtgui', 'aston.spectra', \
'aston.test', 'aston.trace', 'aston.tracefile'],
'scripts': ['astonx.py'],
'data_files': matplotlib.get_py2exe_datafiles(),
'package_data': {'aston': \
['qtgui/i18n/*.qm', 'qtgui/icons/*.png']},
'include_package_data': True,
'install_requires': ['numpy', 'scipy', 'matplotlib', 'sqlalchemy'],
'test_suite': 'nose.collector',
}
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
#setup the distutils stuff
try:
import py2exe
except ImportError:
print('Could not import py2exe. Windows exe could not be built.')
sys.exit(0)
options['windows'] = ['astonx.py']
# scipy...._validation is only needed because of bug in scipy
#options['data_files'] += [('Microsoft.VC90.CRT', \
# glob(r'C:\Program Files\Microsoft Visual Studio 9.0' + \
# r'\VC\redist\x86\Microsoft.VC90.CRT\*.*'))]
options['data_files'] += [(r'aston\qtgui\i18n', \
glob(os.path.abspath(r'aston\qtgui\i18n\*.qm')))]
options['data_files'] += [(r'aston\qtgui\icons', \
glob(os.path.abspath(r'aston\qtgui\icons\*.png')))]
options['zipfile'] = None
options['options'] = {
'py2exe': {'skip_archive': False,
'bundle_files': 2,
'compressed': True,
'optimize': '2',
'dll_excludes': ['MSVCP90.dll', 'tcl85.dll', \
'tk85.dll', 'w9xpopen.exe'],
'includes': ['sip', 'scipy.sparse.csgraph._validation', \
'scipy.io.matlab.streams'],
'excludes': ['_gtkagg', '_tkagg', 'tcl', 'Tkconstants', 'Tkinter']}
}
#clean up stuff
os.system('rmdir dist /s /q')
os.system('rmdir build /s /q')
os.system('rmdir dist_win /s /q')
elif len(sys.argv) >= 2 and sys.argv[1] == 'py2app':
#setup the distutils stuff
try:
import py2app
except ImportError:
print('Could not import py2app. Mac bundle could not be built.')
sys.exit(0)
options['app'] = ['astonx.py']
options['setup_requires'] = ['py2app']
options['iconfile'] = 'aston/qtgui/icons/logo.icns'
options['data_files'] += [('aston/qtgui/i18n', \
glob(os.path.abspath('aston/qtgui/i18n/*.qm')))]
options['data_files'] += [('aston/qtgui/icons', \
glob(os.path.abspath('aston/qtgui/icons/*.png')))]
options['options'] = {'py2app': {
'argv_emulation': False,
'includes': ['sip', 'PyQt4', 'PyQt4.QtCore', \
'PyQt4.QtGui', 'matplotlib', 'numpy', 'scipy'],
'excludes': ['PyQt4.QtDesigner', 'PyQt4.QtNetwork', \
'PyQt4.QtOpenGL', 'PyQt4.QtScript', 'PyQt4.QtSql', \
'PyQt4.QtTest', 'PyQt4.QtWebKit', 'PyQt4.QtXml', \
'PyQt4.phonon', 'PyQt4.QtHelp', 'PyQt4.QtMultimedia', \
'PyQt4.QtXmlPatterns', 'matplotlib.tests', 'scipy.weave']
}}
#clean up stuff
os.system('rm -rf build')
#all the magic happens right here
setup(**options)
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
os.system('rmdir build /s /q')
os.system('rmdir dist\\mpl-data\\sample_data /s /q')
os.system('copy platform\\win\\*.ico dist\\aston\\qtgui\\icons\\')
#TODO: create the Microsoft.VC90.CRT folder and copy the DLLs
# and manifest into it
#TODO: run the aston.nsi
elif len(sys.argv) >= 2 and sys.argv[1] == 'py2app':
res_path = 'dist/Aston.app/Contents/Resources/'
os.system('rm -rf build')
os.system('cp -rf platform/mac/qt_menu.nib ' + res_path)
os.system('cp platform/mac/qt.conf ' + res_path)
os.system('cp platform/mac/logo.icns ' + res_path + 'PythonApplet.icns')
os.system('rm -rf ' + res_path + 'mpl-data/sample_data')
os.system('rm -rf ' + res_path + 'lib/python2.7/matplotlib/tests')
os.system('rm -rf ' + res_path + '/lib/python2.7/scipy/weave')
os.system('rm -rf ' + res_path + '/lib/python2.7/matplotlib/mpl-data')
# Delete the following directories
#/Content/Resources/lib/python2.7/matplotlib/testing
#/Content/Resources/lib/python2.7/scipy/spatial/tests
#/Content/Resources/lib/python2.7/email/test
#/Content/Frameworks/QtXmlPatterns.framework
#/Content/Frameworks/QtNetwork.framework
#/Content/Frameworks/QtScript.framework
#/Content/Frameworks/QtScriptTools.framework
##TODO: remove stuff from "dist/Aston.app/Contents/Resources/lib/python2.7"
##matplotlib.tests, scipy.weave, numpy.f2py
##libQtNetwork.4.dylib, libQtXmlPatterns.4.dylib, libtcl8.5.dylib
##libtk8.dylib, libQtDeclarative.dylib, libQtScript, libQtScriptTools
##libQtSql, libX11
#The following doesn't seem to work?
#os.system('rm -rf dist_mac')
#os.system('mkdir dist_mac')
#os.system('hdiutil create -fs HFS+ -volname "Aston" -srcfolder dist dist_mac/Aston.dmg')
|
gpl-3.0
|
lazywei/scikit-learn
|
benchmarks/bench_multilabel_metrics.py
|
86
|
7286
|
#!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
return_indicator=True,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
return_indicator=True,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
|
bsd-3-clause
|
BigDataforYou/movie_recommendation_workshop_1
|
big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/tseries/tests/test_timedeltas.py
|
1
|
73366
|
# pylint: disable-msg=E1101,W0612
from __future__ import division
from datetime import timedelta, time
import nose
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, Timestamp, Timedelta,
TimedeltaIndex, isnull, date_range,
timedelta_range, Int64Index)
from pandas.compat import range
from pandas import compat, to_timedelta, tslib
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas.util.testing import (assert_series_equal, assert_frame_equal,
assert_almost_equal, assert_index_equal)
from numpy.testing import assert_allclose
from pandas.tseries.offsets import Day, Second
import pandas.util.testing as tm
from numpy.random import randn
from pandas import _np_version_under1p8
iNaT = tslib.iNaT
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_construction(self):
expected = np.timedelta64(10, 'D').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10, unit='d').value, expected)
self.assertEqual(Timedelta(10.0, unit='d').value, expected)
self.assertEqual(Timedelta('10 days').value, expected)
self.assertEqual(Timedelta(days=10).value, expected)
self.assertEqual(Timedelta(days=10.0).value, expected)
expected += np.timedelta64(10, 's').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta('10 days 00:00:10').value, expected)
self.assertEqual(Timedelta(days=10, seconds=10).value, expected)
self.assertEqual(
Timedelta(days=10, milliseconds=10 * 1000).value, expected)
self.assertEqual(
Timedelta(days=10, microseconds=10 * 1000 * 1000).value, expected)
# test construction with np dtypes
# GH 8757
timedelta_kwargs = {'days': 'D',
'seconds': 's',
'microseconds': 'us',
'milliseconds': 'ms',
'minutes': 'm',
'hours': 'h',
'weeks': 'W'}
npdtypes = [np.int64, np.int32, np.int16, np.float64, np.float32,
np.float16]
for npdtype in npdtypes:
for pykwarg, npkwarg in timedelta_kwargs.items():
expected = np.timedelta64(1,
npkwarg).astype('m8[ns]').view('i8')
self.assertEqual(
Timedelta(**{pykwarg: npdtype(1)}).value, expected)
# rounding cases
self.assertEqual(Timedelta(82739999850000).value, 82739999850000)
self.assertTrue('0 days 22:58:59.999850' in str(Timedelta(
82739999850000)))
self.assertEqual(Timedelta(123072001000000).value, 123072001000000)
self.assertTrue('1 days 10:11:12.001' in str(Timedelta(
123072001000000)))
# string conversion with/without leading zero
# GH 9570
self.assertEqual(Timedelta('0:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('00:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('-1:00:00'), -timedelta(hours=1))
self.assertEqual(Timedelta('-01:00:00'), -timedelta(hours=1))
# more strings & abbrevs
# GH 8190
self.assertEqual(Timedelta('1 h'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hour'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hr'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hours'), timedelta(hours=1))
self.assertEqual(Timedelta('-1 hours'), -timedelta(hours=1))
self.assertEqual(Timedelta('1 m'), timedelta(minutes=1))
self.assertEqual(Timedelta('1.5 m'), timedelta(seconds=90))
self.assertEqual(Timedelta('1 minute'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 minutes'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 s'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 second'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 seconds'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 ms'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 milli'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 millisecond'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 us'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 micros'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 microsecond'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1.5 microsecond'),
Timedelta('00:00:00.000001500'))
self.assertEqual(Timedelta('1 ns'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nano'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nanosecond'),
Timedelta('00:00:00.000000001'))
# combos
self.assertEqual(Timedelta('10 days 1 hour'),
timedelta(days=10, hours=1))
self.assertEqual(Timedelta('10 days 1 h'), timedelta(days=10, hours=1))
self.assertEqual(Timedelta('10 days 1 h 1m 1s'), timedelta(
days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -
timedelta(days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -
timedelta(days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s 3us'), -
timedelta(days=10, hours=1, minutes=1,
seconds=1, microseconds=3))
self.assertEqual(Timedelta('-10 days 1 h 1.5m 1s 3us'), -
timedelta(days=10, hours=1, minutes=1,
seconds=31, microseconds=3))
# currently invalid as it has a - on the hhmmdd part (only allowed on
# the days)
self.assertRaises(ValueError,
lambda: Timedelta('-10 days -1 h 1.5m 1s 3us'))
# only leading neg signs are allowed
self.assertRaises(ValueError,
lambda: Timedelta('10 days -1 h 1.5m 1s 3us'))
# no units specified
self.assertRaises(ValueError, lambda: Timedelta('3.1415'))
# invalid construction
tm.assertRaisesRegexp(ValueError, "cannot construct a TimeDelta",
lambda: Timedelta())
tm.assertRaisesRegexp(ValueError, "unit abbreviation w/o a number",
lambda: Timedelta('foo'))
tm.assertRaisesRegexp(ValueError,
"cannot construct a TimeDelta from the passed "
"arguments, allowed keywords are ",
lambda: Timedelta(day=10))
# roundtripping both for string and value
for v in ['1s', '-1s', '1us', '-1us', '1 day', '-1 day',
'-23:59:59.999999', '-1 days +23:59:59.999999', '-1ns',
'1ns', '-23:59:59.999999999']:
td = Timedelta(v)
self.assertEqual(Timedelta(td.value), td)
# str does not normally display nanos
if not td.nanoseconds:
self.assertEqual(Timedelta(str(td)), td)
self.assertEqual(Timedelta(td._repr_base(format='all')), td)
# floats
expected = np.timedelta64(
10, 's').astype('m8[ns]').view('i8') + np.timedelta64(
500, 'ms').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10.5, unit='s').value, expected)
# nat
self.assertEqual(Timedelta('').value, iNaT)
self.assertEqual(Timedelta('nat').value, iNaT)
self.assertEqual(Timedelta('NAT').value, iNaT)
self.assertTrue(isnull(Timestamp('nat')))
self.assertTrue(isnull(Timedelta('nat')))
# offset
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Hour(2)),
Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Second(2)),
Timedelta('0 days, 00:00:02'))
# unicode
# GH 11995
expected = Timedelta('1H')
result = pd.Timedelta(u'1H')
self.assertEqual(result, expected)
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
Timedelta(u'0 days, 02:00:00'))
self.assertRaises(ValueError, lambda: Timedelta(u'foo bar'))
def test_round(self):
t1 = Timedelta('1 days 02:34:56.789123456')
t2 = Timedelta('-1 days 02:34:56.789123456')
for (freq, s1, s2) in [('N', t1, t2),
('U', Timedelta('1 days 02:34:56.789123000'),
Timedelta('-1 days 02:34:56.789123000')),
('L', Timedelta('1 days 02:34:56.789000000'),
Timedelta('-1 days 02:34:56.789000000')),
('S', Timedelta('1 days 02:34:57'),
Timedelta('-1 days 02:34:57')),
('2S', Timedelta('1 days 02:34:56'),
Timedelta('-1 days 02:34:56')),
('5S', Timedelta('1 days 02:34:55'),
Timedelta('-1 days 02:34:55')),
('T', Timedelta('1 days 02:35:00'),
Timedelta('-1 days 02:35:00')),
('12T', Timedelta('1 days 02:36:00'),
Timedelta('-1 days 02:36:00')),
('H', Timedelta('1 days 03:00:00'),
Timedelta('-1 days 03:00:00')),
('d', Timedelta('1 days'),
Timedelta('-1 days'))]:
r1 = t1.round(freq)
self.assertEqual(r1, s1)
r2 = t2.round(freq)
self.assertEqual(r2, s2)
# invalid
for freq in ['Y', 'M', 'foobar']:
self.assertRaises(ValueError, lambda: t1.round(freq))
t1 = timedelta_range('1 days', periods=3, freq='1 min 2 s 3 us')
t2 = -1 * t1
t1a = timedelta_range('1 days', periods=3, freq='1 min 2 s')
t1c = pd.TimedeltaIndex([1, 1, 1], unit='D')
# note that negative times round DOWN! so don't give whole numbers
for (freq, s1, s2) in [('N', t1, t2),
('U', t1, t2),
('L', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('S', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('12T', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('H', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('d', t1c,
pd.TimedeltaIndex([-1, -1, -1], unit='D')
)]:
r1 = t1.round(freq)
tm.assert_index_equal(r1, s1)
r2 = t2.round(freq)
tm.assert_index_equal(r2, s2)
# invalid
for freq in ['Y', 'M', 'foobar']:
self.assertRaises(ValueError, lambda: t1.round(freq))
def test_repr(self):
self.assertEqual(repr(Timedelta(10, unit='d')),
"Timedelta('10 days 00:00:00')")
self.assertEqual(repr(Timedelta(10, unit='s')),
"Timedelta('0 days 00:00:10')")
self.assertEqual(repr(Timedelta(10, unit='ms')),
"Timedelta('0 days 00:00:00.010000')")
self.assertEqual(repr(Timedelta(-10, unit='ms')),
"Timedelta('-1 days +23:59:59.990000')")
def test_identity(self):
td = Timedelta(10, unit='d')
self.assertTrue(isinstance(td, Timedelta))
self.assertTrue(isinstance(td, timedelta))
def test_conversion(self):
for td in [Timedelta(10, unit='d'),
Timedelta('1 days, 10:11:12.012345')]:
pydt = td.to_pytimedelta()
self.assertTrue(td == Timedelta(pydt))
self.assertEqual(td, pydt)
self.assertTrue(isinstance(pydt, timedelta) and not isinstance(
pydt, Timedelta))
self.assertEqual(td, np.timedelta64(td.value, 'ns'))
td64 = td.to_timedelta64()
self.assertEqual(td64, np.timedelta64(td.value, 'ns'))
self.assertEqual(td, td64)
self.assertTrue(isinstance(td64, np.timedelta64))
# this is NOT equal and cannot be roundtriped (because of the nanos)
td = Timedelta('1 days, 10:11:12.012345678')
self.assertTrue(td != td.to_pytimedelta())
def test_ops(self):
td = Timedelta(10, unit='d')
self.assertEqual(-td, Timedelta(-10, unit='d'))
self.assertEqual(+td, Timedelta(10, unit='d'))
self.assertEqual(td - td, Timedelta(0, unit='ns'))
self.assertTrue((td - pd.NaT) is pd.NaT)
self.assertEqual(td + td, Timedelta(20, unit='d'))
self.assertTrue((td + pd.NaT) is pd.NaT)
self.assertEqual(td * 2, Timedelta(20, unit='d'))
self.assertTrue((td * pd.NaT) is pd.NaT)
self.assertEqual(td / 2, Timedelta(5, unit='d'))
self.assertEqual(abs(td), td)
self.assertEqual(abs(-td), td)
self.assertEqual(td / td, 1)
self.assertTrue((td / pd.NaT) is np.nan)
# invert
self.assertEqual(-td, Timedelta('-10d'))
self.assertEqual(td * -1, Timedelta('-10d'))
self.assertEqual(-1 * td, Timedelta('-10d'))
self.assertEqual(abs(-td), Timedelta('10d'))
# invalid
self.assertRaises(TypeError, lambda: Timedelta(11, unit='d') // 2)
# invalid multiply with another timedelta
self.assertRaises(TypeError, lambda: td * td)
# can't operate with integers
self.assertRaises(TypeError, lambda: td + 2)
self.assertRaises(TypeError, lambda: td - 2)
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
self.assertEqual(Timedelta(241, unit='h'), td + pd.offsets.Hour(1))
self.assertEqual(Timedelta(241, unit='h'), pd.offsets.Hour(1) + td)
self.assertEqual(240, td / pd.offsets.Hour(1))
self.assertEqual(1 / 240.0, pd.offsets.Hour(1) / td)
self.assertEqual(Timedelta(239, unit='h'), td - pd.offsets.Hour(1))
self.assertEqual(Timedelta(-239, unit='h'), pd.offsets.Hour(1) - td)
def test_freq_conversion(self):
td = Timedelta('1 days 2 hours 3 ns')
result = td / np.timedelta64(1, 'D')
self.assertEqual(result, td.value / float(86400 * 1e9))
result = td / np.timedelta64(1, 's')
self.assertEqual(result, td.value / float(1e9))
result = td / np.timedelta64(1, 'ns')
self.assertEqual(result, td.value)
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
self.assertRaises(TypeError, lambda: td + np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
self.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(-other + td, expected)
self.assertRaises(TypeError, lambda: td - np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td * np.array([2]), expected)
self.assert_numpy_array_equal(np.array([2]) * td, expected)
self.assertRaises(TypeError, lambda: td * other)
self.assertRaises(TypeError, lambda: other * td)
self.assert_numpy_array_equal(td / other, np.array([1]))
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other / td, np.array([1]))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
self.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other - td, expected)
def test_ops_series(self):
# regression test for GH8813
td = Timedelta('1 day')
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(['1 day', '2 days']))
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
def test_compare_timedelta_series(self):
# regresssion test for GH5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_compare_timedelta_ndarray(self):
# GH11835
periods = [Timedelta('0 days 01:00:00'), Timedelta('0 days 01:00:00')]
arr = np.array(periods)
result = arr[0] > arr
expected = np.array([False, False])
self.assert_numpy_array_equal(result, expected)
def test_ops_notimplemented(self):
class Other:
pass
other = Other()
td = Timedelta('1 day')
self.assertTrue(td.__add__(other) is NotImplemented)
self.assertTrue(td.__sub__(other) is NotImplemented)
self.assertTrue(td.__truediv__(other) is NotImplemented)
self.assertTrue(td.__mul__(other) is NotImplemented)
self.assertTrue(td.__floordiv__(td) is NotImplemented)
def test_fields(self):
def check(value):
# that we are int/long like
self.assertTrue(isinstance(value, (int, compat.long)))
# compat to datetime.timedelta
rng = to_timedelta('1 days, 10:11:12')
self.assertEqual(rng.days, 1)
self.assertEqual(rng.seconds, 10 * 3600 + 11 * 60 + 12)
self.assertEqual(rng.microseconds, 0)
self.assertEqual(rng.nanoseconds, 0)
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta('-1 days, 10:11:12')
self.assertEqual(abs(td), Timedelta('13:48:48'))
self.assertTrue(str(td) == "-1 days +10:11:12")
self.assertEqual(-td, Timedelta('0 days 13:48:48'))
self.assertEqual(-Timedelta('-1 days, 10:11:12').value, 49728000000000)
self.assertEqual(Timedelta('-1 days, 10:11:12').value, -49728000000000)
rng = to_timedelta('-1 days, 10:11:12.100123456')
self.assertEqual(rng.days, -1)
self.assertEqual(rng.seconds, 10 * 3600 + 11 * 60 + 12)
self.assertEqual(rng.microseconds, 100 * 1000 + 123)
self.assertEqual(rng.nanoseconds, 456)
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# components
tup = pd.to_timedelta(-1, 'us').components
self.assertEqual(tup.days, -1)
self.assertEqual(tup.hours, 23)
self.assertEqual(tup.minutes, 59)
self.assertEqual(tup.seconds, 59)
self.assertEqual(tup.milliseconds, 999)
self.assertEqual(tup.microseconds, 999)
self.assertEqual(tup.nanoseconds, 0)
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta('-1 days 1 us').components
self.assertEqual(tup.days, -2)
self.assertEqual(tup.hours, 23)
self.assertEqual(tup.minutes, 59)
self.assertEqual(tup.seconds, 59)
self.assertEqual(tup.milliseconds, 999)
self.assertEqual(tup.microseconds, 999)
self.assertEqual(tup.nanoseconds, 0)
def test_timedelta_range(self):
expected = to_timedelta(np.arange(5), unit='D')
result = timedelta_range('0 days', periods=5, freq='D')
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(11), unit='D')
result = timedelta_range('0 days', '10 days', freq='D')
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(5), unit='D') + Second(2) + Day()
result = timedelta_range('1 days, 00:00:02', '5 days, 00:00:02',
freq='D')
tm.assert_index_equal(result, expected)
expected = to_timedelta([1, 3, 5, 7, 9], unit='D') + Second(2)
result = timedelta_range('1 days, 00:00:02', periods=5, freq='2D')
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(50), unit='T') * 30
result = timedelta_range('0 days', freq='30T', periods=50)
tm.assert_index_equal(result, expected)
# GH 11776
arr = np.arange(10).reshape(2, 5)
df = pd.DataFrame(np.arange(10).reshape(2, 5))
for arg in (arr, df):
with tm.assertRaisesRegexp(TypeError, "1-d array"):
to_timedelta(arg)
for errors in ['ignore', 'raise', 'coerce']:
with tm.assertRaisesRegexp(TypeError, "1-d array"):
to_timedelta(arg, errors=errors)
# issue10583
df = pd.DataFrame(np.random.normal(size=(10, 4)))
df.index = pd.timedelta_range(start='0s', periods=10, freq='s')
expected = df.loc[pd.Timedelta('0s'):, :]
result = df.loc['0s':, :]
assert_frame_equal(expected, result)
def test_numeric_conversions(self):
self.assertEqual(ct(0), np.timedelta64(0, 'ns'))
self.assertEqual(ct(10), np.timedelta64(10, 'ns'))
self.assertEqual(ct(10, unit='ns'), np.timedelta64(
10, 'ns').astype('m8[ns]'))
self.assertEqual(ct(10, unit='us'), np.timedelta64(
10, 'us').astype('m8[ns]'))
self.assertEqual(ct(10, unit='ms'), np.timedelta64(
10, 'ms').astype('m8[ns]'))
self.assertEqual(ct(10, unit='s'), np.timedelta64(
10, 's').astype('m8[ns]'))
self.assertEqual(ct(10, unit='d'), np.timedelta64(
10, 'D').astype('m8[ns]'))
def test_timedelta_conversions(self):
self.assertEqual(ct(timedelta(seconds=1)),
np.timedelta64(1, 's').astype('m8[ns]'))
self.assertEqual(ct(timedelta(microseconds=1)),
np.timedelta64(1, 'us').astype('m8[ns]'))
self.assertEqual(ct(timedelta(days=1)),
np.timedelta64(1, 'D').astype('m8[ns]'))
def test_short_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
self.assertEqual(ct('10'), np.timedelta64(10, 'ns'))
self.assertEqual(ct('10ns'), np.timedelta64(10, 'ns'))
self.assertEqual(ct('100'), np.timedelta64(100, 'ns'))
self.assertEqual(ct('100ns'), np.timedelta64(100, 'ns'))
self.assertEqual(ct('1000'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('1000ns'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('1000NS'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('10us'), np.timedelta64(10000, 'ns'))
self.assertEqual(ct('100us'), np.timedelta64(100000, 'ns'))
self.assertEqual(ct('1000us'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1000Us'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1000uS'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1ms'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('10ms'), np.timedelta64(10000000, 'ns'))
self.assertEqual(ct('100ms'), np.timedelta64(100000000, 'ns'))
self.assertEqual(ct('1000ms'), np.timedelta64(1000000000, 'ns'))
self.assertEqual(ct('-1s'), -np.timedelta64(1000000000, 'ns'))
self.assertEqual(ct('1s'), np.timedelta64(1000000000, 'ns'))
self.assertEqual(ct('10s'), np.timedelta64(10000000000, 'ns'))
self.assertEqual(ct('100s'), np.timedelta64(100000000000, 'ns'))
self.assertEqual(ct('1000s'), np.timedelta64(1000000000000, 'ns'))
self.assertEqual(ct('1d'), conv(np.timedelta64(1, 'D')))
self.assertEqual(ct('-1d'), -conv(np.timedelta64(1, 'D')))
self.assertEqual(ct('1D'), conv(np.timedelta64(1, 'D')))
self.assertEqual(ct('10D'), conv(np.timedelta64(10, 'D')))
self.assertEqual(ct('100D'), conv(np.timedelta64(100, 'D')))
self.assertEqual(ct('1000D'), conv(np.timedelta64(1000, 'D')))
self.assertEqual(ct('10000D'), conv(np.timedelta64(10000, 'D')))
# space
self.assertEqual(ct(' 10000D '), conv(np.timedelta64(10000, 'D')))
self.assertEqual(ct(' - 10000D '), -conv(np.timedelta64(10000, 'D')))
# invalid
self.assertRaises(ValueError, ct, '1foo')
self.assertRaises(ValueError, ct, 'foo')
def test_full_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1, 'D')
self.assertEqual(ct('1days'), conv(d1))
self.assertEqual(ct('1days,'), conv(d1))
self.assertEqual(ct('- 1days,'), -conv(d1))
self.assertEqual(ct('00:00:01'), conv(np.timedelta64(1, 's')))
self.assertEqual(ct('06:00:01'), conv(
np.timedelta64(6 * 3600 + 1, 's')))
self.assertEqual(ct('06:00:01.0'), conv(
np.timedelta64(6 * 3600 + 1, 's')))
self.assertEqual(ct('06:00:01.01'), conv(
np.timedelta64(1000 * (6 * 3600 + 1) + 10, 'ms')))
self.assertEqual(ct('- 1days, 00:00:01'),
conv(-d1 + np.timedelta64(1, 's')))
self.assertEqual(ct('1days, 06:00:01'), conv(
d1 + np.timedelta64(6 * 3600 + 1, 's')))
self.assertEqual(ct('1days, 06:00:01.01'), conv(
d1 + np.timedelta64(1000 * (6 * 3600 + 1) + 10, 'ms')))
# invalid
self.assertRaises(ValueError, ct, '- 1days, 00')
def test_nat_converters(self):
self.assertEqual(to_timedelta(
'nat', box=False).astype('int64'), tslib.iNaT)
self.assertEqual(to_timedelta(
'nan', box=False).astype('int64'), tslib.iNaT)
def test_to_timedelta(self):
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1, 'D')
self.assertEqual(to_timedelta('1 days 06:05:01.00003', box=False),
conv(d1 + np.timedelta64(6 * 3600 +
5 * 60 + 1, 's') +
np.timedelta64(30, 'us')))
self.assertEqual(to_timedelta('15.5us', box=False),
conv(np.timedelta64(15500, 'ns')))
# empty string
result = to_timedelta('', box=False)
self.assertEqual(result.astype('int64'), tslib.iNaT)
result = to_timedelta(['', ''])
self.assertTrue(isnull(result).all())
# pass thru
result = to_timedelta(np.array([np.timedelta64(1, 's')]))
expected = pd.Index(np.array([np.timedelta64(1, 's')]))
tm.assert_index_equal(result, expected)
# ints
result = np.timedelta64(0, 'ns')
expected = to_timedelta(0, box=False)
self.assertEqual(result, expected)
# Series
expected = Series([timedelta(days=1), timedelta(days=1, seconds=1)])
result = to_timedelta(Series(['1d', '1days 00:00:01']))
tm.assert_series_equal(result, expected)
# with units
result = TimedeltaIndex([np.timedelta64(0, 'ns'), np.timedelta64(
10, 's').astype('m8[ns]')])
expected = to_timedelta([0, 10], unit='s')
tm.assert_index_equal(result, expected)
# single element conversion
v = timedelta(seconds=1)
result = to_timedelta(v, box=False)
expected = np.timedelta64(timedelta(seconds=1))
self.assertEqual(result, expected)
v = np.timedelta64(timedelta(seconds=1))
result = to_timedelta(v, box=False)
expected = np.timedelta64(timedelta(seconds=1))
self.assertEqual(result, expected)
# arrays of various dtypes
arr = np.array([1] * 5, dtype='int64')
result = to_timedelta(arr, unit='s')
expected = TimedeltaIndex([np.timedelta64(1, 's')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='int64')
result = to_timedelta(arr, unit='m')
expected = TimedeltaIndex([np.timedelta64(1, 'm')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='int64')
result = to_timedelta(arr, unit='h')
expected = TimedeltaIndex([np.timedelta64(1, 'h')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='timedelta64[s]')
result = to_timedelta(arr)
expected = TimedeltaIndex([np.timedelta64(1, 's')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='timedelta64[D]')
result = to_timedelta(arr)
expected = TimedeltaIndex([np.timedelta64(1, 'D')] * 5)
tm.assert_index_equal(result, expected)
# Test with lists as input when box=false
expected = np.array(np.arange(3) * 1000000000, dtype='timedelta64[ns]')
result = to_timedelta(range(3), unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
result = to_timedelta(np.arange(3), unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
result = to_timedelta([0, 1, 2], unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
# Tests with fractional seconds as input:
expected = np.array(
[0, 500000000, 800000000, 1200000000], dtype='timedelta64[ns]')
result = to_timedelta([0., 0.5, 0.8, 1.2], unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
def testit(unit, transform):
# array
result = to_timedelta(np.arange(5), unit=unit)
expected = TimedeltaIndex([np.timedelta64(i, transform(unit))
for i in np.arange(5).tolist()])
tm.assert_index_equal(result, expected)
# scalar
result = to_timedelta(2, unit=unit)
expected = Timedelta(np.timedelta64(2, transform(unit)).astype(
'timedelta64[ns]'))
self.assertEqual(result, expected)
# validate all units
# GH 6855
for unit in ['Y', 'M', 'W', 'D', 'y', 'w', 'd']:
testit(unit, lambda x: x.upper())
for unit in ['days', 'day', 'Day', 'Days']:
testit(unit, lambda x: 'D')
for unit in ['h', 'm', 's', 'ms', 'us', 'ns', 'H', 'S', 'MS', 'US',
'NS']:
testit(unit, lambda x: x.lower())
# offsets
# m
testit('T', lambda x: 'm')
# ms
testit('L', lambda x: 'ms')
def test_to_timedelta_invalid(self):
# these will error
self.assertRaises(ValueError, lambda: to_timedelta([1, 2], unit='foo'))
self.assertRaises(ValueError, lambda: to_timedelta(1, unit='foo'))
# time not supported ATM
self.assertRaises(ValueError, lambda: to_timedelta(time(second=1)))
self.assertTrue(to_timedelta(
time(second=1), errors='coerce') is pd.NaT)
self.assertRaises(ValueError, lambda: to_timedelta(['foo', 'bar']))
tm.assert_index_equal(TimedeltaIndex([pd.NaT, pd.NaT]),
to_timedelta(['foo', 'bar'], errors='coerce'))
tm.assert_index_equal(TimedeltaIndex(['1 day', pd.NaT, '1 min']),
to_timedelta(['1 day', 'bar', '1 min'],
errors='coerce'))
def test_to_timedelta_via_apply(self):
# GH 5458
expected = Series([np.timedelta64(1, 's')])
result = Series(['00:00:01']).apply(to_timedelta)
tm.assert_series_equal(result, expected)
result = Series([to_timedelta('00:00:01')])
tm.assert_series_equal(result, expected)
def test_timedelta_ops(self):
# GH4984
# make sure ops return Timedelta
s = Series([Timestamp('20130101') + timedelta(seconds=i * i)
for i in range(10)])
td = s.diff()
result = td.mean()
expected = to_timedelta(timedelta(seconds=9))
self.assertEqual(result, expected)
result = td.to_frame().mean()
self.assertEqual(result[0], expected)
result = td.quantile(.1)
expected = Timedelta(np.timedelta64(2600, 'ms'))
self.assertEqual(result, expected)
result = td.median()
expected = to_timedelta('00:00:09')
self.assertEqual(result, expected)
result = td.to_frame().median()
self.assertEqual(result[0], expected)
# GH 6462
# consistency in returned values for sum
result = td.sum()
expected = to_timedelta('00:01:21')
self.assertEqual(result, expected)
result = td.to_frame().sum()
self.assertEqual(result[0], expected)
# std
result = td.std()
expected = to_timedelta(Series(td.dropna().values).std())
self.assertEqual(result, expected)
result = td.to_frame().std()
self.assertEqual(result[0], expected)
# invalid ops
for op in ['skew', 'kurt', 'sem', 'prod']:
self.assertRaises(TypeError, getattr(td, op))
# GH 10040
# make sure NaT is properly handled by median()
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07')])
self.assertEqual(s.diff().median(), timedelta(days=4))
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07'),
Timestamp('2015-02-15')])
self.assertEqual(s.diff().median(), timedelta(days=6))
def test_overflow(self):
# GH 9442
s = Series(pd.date_range('20130101', periods=100000, freq='H'))
s[0] += pd.Timedelta('1s 1ms')
# mean
result = (s - s.min()).mean()
expected = pd.Timedelta((pd.DatetimeIndex((s - s.min())).asi8 / len(s)
).sum())
# the computation is converted to float so might be some loss of
# precision
self.assertTrue(np.allclose(result.value / 1000, expected.value /
1000))
# sum
self.assertRaises(ValueError, lambda: (s - s.min()).sum())
s1 = s[0:10000]
self.assertRaises(ValueError, lambda: (s1 - s1.min()).sum())
s2 = s[0:1000]
result = (s2 - s2.min()).sum()
def test_timedelta_ops_scalar(self):
# GH 6808
base = pd.to_datetime('20130101 09:01:12.123456')
expected_add = pd.to_datetime('20130101 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta(10, unit='s'), timedelta(seconds=10),
np.timedelta64(10, 's'),
np.timedelta64(10000000000, 'ns'),
pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
base = pd.to_datetime('20130102 09:01:12.123456')
expected_add = pd.to_datetime('20130103 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta('1 day, 00:00:10'),
pd.to_timedelta('1 days, 00:00:10'),
timedelta(days=1, seconds=10),
np.timedelta64(1, 'D') + np.timedelta64(10, 's'),
pd.offsets.Day() + pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
def test_to_timedelta_on_missing_values(self):
# GH5438
timedelta_NaT = np.timedelta64('NaT')
actual = pd.to_timedelta(Series(['00:00:01', np.nan]))
expected = Series([np.timedelta64(1000000000, 'ns'),
timedelta_NaT], dtype='<m8[ns]')
assert_series_equal(actual, expected)
actual = pd.to_timedelta(Series(['00:00:01', pd.NaT]))
assert_series_equal(actual, expected)
actual = pd.to_timedelta(np.nan)
self.assertEqual(actual.value, timedelta_NaT.astype('int64'))
actual = pd.to_timedelta(pd.NaT)
self.assertEqual(actual.value, timedelta_NaT.astype('int64'))
def test_to_timedelta_on_nanoseconds(self):
# GH 9273
result = Timedelta(nanoseconds=100)
expected = Timedelta('100ns')
self.assertEqual(result, expected)
result = Timedelta(days=1, hours=1, minutes=1, weeks=1, seconds=1,
milliseconds=1, microseconds=1, nanoseconds=1)
expected = Timedelta(694861001001001)
self.assertEqual(result, expected)
result = Timedelta(microseconds=1) + Timedelta(nanoseconds=1)
expected = Timedelta('1us1ns')
self.assertEqual(result, expected)
result = Timedelta(microseconds=1) - Timedelta(nanoseconds=1)
expected = Timedelta('999ns')
self.assertEqual(result, expected)
result = Timedelta(microseconds=1) + 5 * Timedelta(nanoseconds=-2)
expected = Timedelta('990ns')
self.assertEqual(result, expected)
self.assertRaises(TypeError, lambda: Timedelta(nanoseconds='abc'))
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = DataFrame(['00:00:02']).apply(pd.to_timedelta)
dfn = DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
NA = np.nan
actual = scalar1 + scalar1
self.assertEqual(actual, scalar2)
actual = scalar2 - scalar1
self.assertEqual(actual, scalar1)
actual = s1 + s1
assert_series_equal(actual, s2)
actual = s2 - s1
assert_series_equal(actual, s1)
actual = s1 + scalar1
assert_series_equal(actual, s2)
actual = scalar1 + s1
assert_series_equal(actual, s2)
actual = s2 - scalar1
assert_series_equal(actual, s1)
actual = -scalar1 + s2
assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
assert_series_equal(actual, sn)
actual = s1 + NA
assert_series_equal(actual, sn)
actual = NA + s1
assert_series_equal(actual, sn)
actual = s1 - NA
assert_series_equal(actual, sn)
actual = -NA + s1
assert_series_equal(actual, sn)
actual = s1 + pd.NaT
assert_series_equal(actual, sn)
actual = s2 - pd.NaT
assert_series_equal(actual, sn)
actual = s1 + df1
assert_frame_equal(actual, df2)
actual = s2 - df1
assert_frame_equal(actual, df1)
actual = df1 + s1
assert_frame_equal(actual, df2)
actual = df2 - s1
assert_frame_equal(actual, df1)
actual = df1 + df1
assert_frame_equal(actual, df2)
actual = df2 - df1
assert_frame_equal(actual, df1)
actual = df1 + scalar1
assert_frame_equal(actual, df2)
actual = df2 - scalar1
assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 + NA
assert_frame_equal(actual, dfn)
actual = df1 - NA
assert_frame_equal(actual, dfn)
actual = df1 + pd.NaT # NaT is datetime, not timedelta
assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
assert_frame_equal(actual, dfn)
def test_apply_to_timedelta(self):
timedelta_NaT = pd.to_timedelta('NaT')
list_of_valid_strings = ['00:00:01', '00:00:02']
a = pd.to_timedelta(list_of_valid_strings)
b = Series(list_of_valid_strings).apply(pd.to_timedelta)
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
list_of_strings = ['00:00:01', np.nan, pd.NaT, timedelta_NaT]
# TODO: unused?
a = pd.to_timedelta(list_of_strings) # noqa
b = Series(list_of_strings).apply(pd.to_timedelta) # noqa
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
def test_pickle(self):
v = Timedelta('1 days 10:11:12.0123456')
v_p = self.round_trip_pickle(v)
self.assertEqual(v, v_p)
def test_timedelta_hash_equality(self):
# GH 11129
v = Timedelta(1, 'D')
td = timedelta(days=1)
self.assertEqual(hash(v), hash(td))
d = {td: 2}
self.assertEqual(d[v], 2)
tds = timedelta_range('1 second', periods=20)
self.assertTrue(all(hash(td) == hash(td.to_pytimedelta()) for td in
tds))
# python timedeltas drop ns resolution
ns_td = Timedelta(1, 'ns')
self.assertNotEqual(hash(ns_td), hash(ns_td.to_pytimedelta()))
def test_implementation_limits(self):
min_td = Timedelta(Timedelta.min)
max_td = Timedelta(Timedelta.max)
# GH 12727
# timedelta limits correspond to int64 boundaries
self.assertTrue(min_td.value == np.iinfo(np.int64).min + 1)
self.assertTrue(max_td.value == np.iinfo(np.int64).max)
# Beyond lower limit, a NAT before the Overflow
self.assertIsInstance(min_td - Timedelta(1, 'ns'),
pd.tslib.NaTType)
with tm.assertRaises(OverflowError):
min_td - Timedelta(2, 'ns')
with tm.assertRaises(OverflowError):
max_td + Timedelta(1, 'ns')
# Same tests using the internal nanosecond values
td = Timedelta(min_td.value - 1, 'ns')
self.assertIsInstance(td, pd.tslib.NaTType)
with tm.assertRaises(OverflowError):
Timedelta(min_td.value - 2, 'ns')
with tm.assertRaises(OverflowError):
Timedelta(max_td.value + 1, 'ns')
class TestTimedeltaIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_pass_TimedeltaIndex_to_index(self):
rng = timedelta_range('1 days', '10 days')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pytimedelta(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_pickle(self):
rng = timedelta_range('1 days', periods=10)
rng_p = self.round_trip_pickle(rng)
tm.assert_index_equal(rng, rng_p)
def test_hash_error(self):
index = timedelta_range('1 days', periods=10)
with tm.assertRaisesRegexp(TypeError, "unhashable type: %r" %
type(index).__name__):
hash(index)
def test_append_join_nondatetimeindex(self):
rng = timedelta_range('1 days', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assertIsInstance(result[0], Timedelta)
# it works
rng.join(idx, how='outer')
def test_append_numpy_bug_1681(self):
td = timedelta_range('1 days', '10 days', freq='2D')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': td}, index=td)
str(c)
result = a.append(c)
self.assertTrue((result['B'] == td).all())
def test_astype(self):
rng = timedelta_range('1 days', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_fields(self):
rng = timedelta_range('1 days, 10:11:12.100123456', periods=2,
freq='s')
self.assert_numpy_array_equal(rng.days, np.array(
[1, 1], dtype='int64'))
self.assert_numpy_array_equal(
rng.seconds,
np.array([10 * 3600 + 11 * 60 + 12, 10 * 3600 + 11 * 60 + 13],
dtype='int64'))
self.assert_numpy_array_equal(rng.microseconds, np.array(
[100 * 1000 + 123, 100 * 1000 + 123], dtype='int64'))
self.assert_numpy_array_equal(rng.nanoseconds, np.array(
[456, 456], dtype='int64'))
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# with nat
s = Series(rng)
s[1] = np.nan
tm.assert_series_equal(s.dt.days, Series([1, np.nan], index=[0, 1]))
tm.assert_series_equal(s.dt.seconds, Series(
[10 * 3600 + 11 * 60 + 12, np.nan], index=[0, 1]))
def test_total_seconds(self):
# GH 10939
# test index
rng = timedelta_range('1 days, 10:11:12.100123456', periods=2,
freq='s')
expt = [1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9,
1 * 86400 + 10 * 3600 + 11 * 60 + 13 + 100123456. / 1e9]
assert_allclose(rng.total_seconds(), expt, atol=1e-10, rtol=0)
# test Series
s = Series(rng)
s_expt = Series(expt, index=[0, 1])
tm.assert_series_equal(s.dt.total_seconds(), s_expt)
# with nat
s[1] = np.nan
s_expt = Series([1 * 86400 + 10 * 3600 + 11 * 60 +
12 + 100123456. / 1e9, np.nan], index=[0, 1])
tm.assert_series_equal(s.dt.total_seconds(), s_expt)
# with both nat
s = Series([np.nan, np.nan], dtype='timedelta64[ns]')
tm.assert_series_equal(s.dt.total_seconds(), Series(
[np.nan, np.nan], index=[0, 1]))
def test_total_seconds_scalar(self):
# GH 10939
rng = Timedelta('1 days, 10:11:12.100123456')
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9
assert_allclose(rng.total_seconds(), expt, atol=1e-10, rtol=0)
rng = Timedelta(np.nan)
self.assertTrue(np.isnan(rng.total_seconds()))
def test_components(self):
rng = timedelta_range('1 days, 10:11:12', periods=2, freq='s')
rng.components
# with nat
s = Series(rng)
s[1] = np.nan
result = s.dt.components
self.assertFalse(result.iloc[0].isnull().all())
self.assertTrue(result.iloc[1].isnull().all())
def test_constructor(self):
expected = TimedeltaIndex(['1 days', '1 days 00:00:05', '2 days',
'2 days 00:00:02', '0 days 00:00:03'])
result = TimedeltaIndex(['1 days', '1 days, 00:00:05', np.timedelta64(
2, 'D'), timedelta(days=2, seconds=2), pd.offsets.Second(3)])
tm.assert_index_equal(result, expected)
# unicode
result = TimedeltaIndex([u'1 days', '1 days, 00:00:05', np.timedelta64(
2, 'D'), timedelta(days=2, seconds=2), pd.offsets.Second(3)])
expected = TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:01',
'0 days 00:00:02'])
tm.assert_index_equal(TimedeltaIndex(range(3), unit='s'), expected)
expected = TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:05',
'0 days 00:00:09'])
tm.assert_index_equal(TimedeltaIndex([0, 5, 9], unit='s'), expected)
expected = TimedeltaIndex(
['0 days 00:00:00.400', '0 days 00:00:00.450',
'0 days 00:00:01.200'])
tm.assert_index_equal(TimedeltaIndex([400, 450, 1200], unit='ms'),
expected)
def test_constructor_coverage(self):
rng = timedelta_range('1 days', periods=10.5)
exp = timedelta_range('1 days', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, TimedeltaIndex, start='1 days',
periods='foo', freq='D')
self.assertRaises(ValueError, TimedeltaIndex, start='1 days',
end='10 days')
self.assertRaises(ValueError, TimedeltaIndex, '1 days')
# generator expression
gen = (timedelta(i) for i in range(10))
result = TimedeltaIndex(gen)
expected = TimedeltaIndex([timedelta(i) for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['1 days', '2 days', '3 days'])
result = TimedeltaIndex(strings)
expected = to_timedelta([1, 2, 3], unit='d')
self.assertTrue(result.equals(expected))
from_ints = TimedeltaIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming freq
self.assertRaises(ValueError, TimedeltaIndex,
['1 days', '2 days', '4 days'], freq='D')
self.assertRaises(ValueError, TimedeltaIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = TimedeltaIndex(start='1 days', periods=1, freq='D', name='TEST')
self.assertEqual(idx.name, 'TEST')
# GH10025
idx2 = TimedeltaIndex(idx, name='something else')
self.assertEqual(idx2.name, 'something else')
def test_freq_conversion(self):
# doc example
# series
td = Series(date_range('20130101', periods=4)) - \
Series(date_range('20121201', periods=4))
td[2] += timedelta(minutes=5, seconds=3)
td[3] = np.nan
result = td / np.timedelta64(1, 'D')
expected = Series([31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan
])
assert_series_equal(result, expected)
result = td.astype('timedelta64[D]')
expected = Series([31, 31, 31, np.nan])
assert_series_equal(result, expected)
result = td / np.timedelta64(1, 's')
expected = Series([31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3,
np.nan])
assert_series_equal(result, expected)
result = td.astype('timedelta64[s]')
assert_series_equal(result, expected)
# tdi
td = TimedeltaIndex(td)
result = td / np.timedelta64(1, 'D')
expected = Index([31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan])
assert_index_equal(result, expected)
result = td.astype('timedelta64[D]')
expected = Index([31, 31, 31, np.nan])
assert_index_equal(result, expected)
result = td / np.timedelta64(1, 's')
expected = Index([31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3,
np.nan])
assert_index_equal(result, expected)
result = td.astype('timedelta64[s]')
assert_index_equal(result, expected)
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
exp = np.array([True, True, True] + [False] * 7)
self.assert_numpy_array_equal(result, exp)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
if _np_version_under1p8:
# cannot test array because np.datetime('nat') returns today's date
cases = [(tdidx1, tdidx2)]
else:
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
self.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
self.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
self.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
self.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
self.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
self.assert_numpy_array_equal(result, expected)
def test_map(self):
rng = timedelta_range('1 day', periods=10)
f = lambda x: x.days
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_misc_coverage(self):
rng = timedelta_range('1 day', periods=5)
result = rng.groupby(rng.days)
tm.assertIsInstance(list(result.values())[0][0], Timedelta)
idx = TimedeltaIndex(['3d', '1d', '2d'])
self.assertTrue(idx.equals(list(idx)))
non_td = Index(list('abc'))
self.assertFalse(idx.equals(list(non_td)))
def test_union(self):
i1 = timedelta_range('1day', periods=5)
i2 = timedelta_range('3day', periods=5)
result = i1.union(i2)
expected = timedelta_range('1day', periods=7)
self.assert_numpy_array_equal(result, expected)
i1 = Int64Index(np.arange(0, 20, 2))
i2 = TimedeltaIndex(start='1 day', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_union_coverage(self):
idx = TimedeltaIndex(['3d', '1d', '2d'])
ordered = TimedeltaIndex(idx.sort_values(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = timedelta_range('1 day', periods=4, freq='3H')
rng_b = timedelta_range('1 day', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = TimedeltaIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assertTrue(result.equals(exp))
def test_union_bug_1745(self):
left = TimedeltaIndex(['1 day 15:19:49.695000'])
right = TimedeltaIndex(
['2 day 13:04:21.322000', '1 day 15:27:24.873000',
'1 day 15:31:05.350000'])
result = left.union(right)
exp = TimedeltaIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_union_bug_4564(self):
left = timedelta_range("1 day", "30d")
right = left + pd.offsets.Minute(15)
result = left.union(right)
exp = TimedeltaIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_intersection_bug_1708(self):
index_1 = timedelta_range('1 day', periods=4, freq='h')
index_2 = index_1 + pd.offsets.Hour(5)
result = index_1 & index_2
self.assertEqual(len(result), 0)
index_1 = timedelta_range('1 day', periods=4, freq='h')
index_2 = index_1 + pd.offsets.Hour(1)
result = index_1 & index_2
expected = timedelta_range('1 day 01:00:00', periods=3, freq='h')
tm.assert_index_equal(result, expected)
def test_get_duplicates(self):
idx = TimedeltaIndex(['1 day', '2 day', '2 day', '3 day', '3day',
'4day'])
result = idx.get_duplicates()
ex = TimedeltaIndex(['2 day', '3day'])
self.assertTrue(result.equals(ex))
def test_argmin_argmax(self):
idx = TimedeltaIndex(['1 day 00:00:05', '1 day 00:00:01',
'1 day 00:00:02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_sort_values(self):
idx = TimedeltaIndex(['4d', '1d', '2d'])
ordered = idx.sort_values()
self.assertTrue(ordered.is_monotonic)
ordered = idx.sort_values(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.sort_values(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer, [1, 2, 0])
ordered, dexer = idx.sort_values(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer, [0, 2, 1])
def test_insert(self):
idx = TimedeltaIndex(['4day', '1day', '2day'], name='idx')
result = idx.insert(2, timedelta(days=5))
exp = TimedeltaIndex(['4day', '1day', '5day', '2day'], name='idx')
self.assertTrue(result.equals(exp))
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([Timedelta('4day'), 'inserted', Timedelta('1day'),
Timedelta('2day')], name='idx')
self.assertNotIsInstance(result, TimedeltaIndex)
tm.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
idx = timedelta_range('1day 00:00:01', periods=3, freq='s', name='idx')
# preserve freq
expected_0 = TimedeltaIndex(['1day', '1day 00:00:01', '1day 00:00:02',
'1day 00:00:03'],
name='idx', freq='s')
expected_3 = TimedeltaIndex(['1day 00:00:01', '1day 00:00:02',
'1day 00:00:03', '1day 00:00:04'],
name='idx', freq='s')
# reset freq to None
expected_1_nofreq = TimedeltaIndex(['1day 00:00:01', '1day 00:00:01',
'1day 00:00:02', '1day 00:00:03'],
name='idx', freq=None)
expected_3_nofreq = TimedeltaIndex(['1day 00:00:01', '1day 00:00:02',
'1day 00:00:03', '1day 00:00:05'],
name='idx', freq=None)
cases = [(0, Timedelta('1day'), expected_0),
(-3, Timedelta('1day'), expected_0),
(3, Timedelta('1day 00:00:04'), expected_3),
(1, Timedelta('1day 00:00:01'), expected_1_nofreq),
(3, Timedelta('1day 00:00:05'), expected_3_nofreq)]
for n, d, expected in cases:
result = idx.insert(n, d)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, expected.freq)
def test_delete(self):
idx = timedelta_range(start='1 Days', periods=5, freq='D', name='idx')
# prserve freq
expected_0 = timedelta_range(start='2 Days', periods=4, freq='D',
name='idx')
expected_4 = timedelta_range(start='1 Days', periods=4, freq='D',
name='idx')
# reset freq to None
expected_1 = TimedeltaIndex(
['1 day', '3 day', '4 day', '5 day'], freq=None, name='idx')
cases = {0: expected_0,
-5: expected_0,
-1: expected_4,
4: expected_4,
1: expected_1}
for n, expected in compat.iteritems(cases):
result = idx.delete(n)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, expected.freq)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_delete_slice(self):
idx = timedelta_range(start='1 days', periods=10, freq='D', name='idx')
# prserve freq
expected_0_2 = timedelta_range(start='4 days', periods=7, freq='D',
name='idx')
expected_7_9 = timedelta_range(start='1 days', periods=7, freq='D',
name='idx')
# reset freq to None
expected_3_5 = TimedeltaIndex(['1 d', '2 d', '3 d',
'7 d', '8 d', '9 d', '10d'],
freq=None, name='idx')
cases = {(0, 1, 2): expected_0_2,
(7, 8, 9): expected_7_9,
(3, 4, 5): expected_3_5}
for n, expected in compat.iteritems(cases):
result = idx.delete(n)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, expected.freq)
result = idx.delete(slice(n[0], n[-1] + 1))
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, expected.freq)
def test_take(self):
tds = ['1day 02:00:00', '1 day 04:00:00', '1 day 10:00:00']
idx = TimedeltaIndex(start='1d', end='2d', freq='H', name='idx')
expected = TimedeltaIndex(tds, freq=None, name='idx')
taken1 = idx.take([2, 4, 10])
taken2 = idx[[2, 4, 10]]
for taken in [taken1, taken2]:
self.assertTrue(taken.equals(expected))
tm.assertIsInstance(taken, TimedeltaIndex)
self.assertIsNone(taken.freq)
self.assertEqual(taken.name, expected.name)
def test_take_fill_value(self):
# GH 12631
idx = pd.TimedeltaIndex(['1 days', '2 days', '3 days'],
name='xxx')
result = idx.take(np.array([1, 0, -1]))
expected = pd.TimedeltaIndex(['2 days', '1 days', '3 days'],
name='xxx')
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'],
name='xxx')
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.TimedeltaIndex(['2 days', '1 days', '3 days'],
name='xxx')
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assertRaisesRegexp(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assertRaisesRegexp(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with tm.assertRaises(IndexError):
idx.take(np.array([1, -5]))
def test_isin(self):
index = tm.makeTimedeltaIndex(4)
result = index.isin(index)
self.assertTrue(result.all())
result = index.isin(list(index))
self.assertTrue(result.all())
assert_almost_equal(index.isin([index[2], 5]),
[False, False, True, False])
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10,
data_gen_f=lambda *args, **kwargs: randn(),
r_idx_type='i', c_idx_type='td')
str(df)
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
self.assertEqual(cols.dtype, np.dtype('O'))
self.assertEqual(cols.dtype, joined.dtype)
tm.assert_index_equal(cols, joined)
def test_slice_keeps_name(self):
# GH4226
dr = pd.timedelta_range('1d', '5d', freq='H', name='timebucket')
self.assertEqual(dr[1:].name, dr.name)
def test_join_self(self):
index = timedelta_range('1 day', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
self.assertIs(index, joined)
def test_factorize(self):
idx1 = TimedeltaIndex(['1 day', '1 day', '2 day', '2 day', '3 day',
'3 day'])
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = TimedeltaIndex(['1 day', '2 day', '3 day'])
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# freq must be preserved
idx3 = timedelta_range('1 day', periods=4, freq='s')
exp_arr = np.array([0, 1, 2, 3])
arr, idx = idx3.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(idx3))
class TestSlicing(tm.TestCase):
def test_partial_slice(self):
rng = timedelta_range('1 day 10:11:12', freq='h', periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['5 day':'6 day']
expected = s.iloc[86:134]
assert_series_equal(result, expected)
result = s['5 day':]
expected = s.iloc[86:]
assert_series_equal(result, expected)
result = s[:'6 day']
expected = s.iloc[:134]
assert_series_equal(result, expected)
result = s['6 days, 23:11:12']
self.assertEqual(result, s.iloc[133])
self.assertRaises(KeyError, s.__getitem__, '50 days')
def test_partial_slice_high_reso(self):
# higher reso
rng = timedelta_range('1 day 10:11:12', freq='us', periods=2000)
s = Series(np.arange(len(rng)), index=rng)
result = s['1 day 10:11:12':]
expected = s.iloc[0:]
assert_series_equal(result, expected)
result = s['1 day 10:11:12.001':]
expected = s.iloc[1000:]
assert_series_equal(result, expected)
result = s['1 days, 10:11:12.001001']
self.assertEqual(result, s.iloc[1001])
def test_slice_with_negative_step(self):
ts = Series(np.arange(20), timedelta_range('0', periods=20, freq='H'))
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
assert_series_equal(ts[l_slc], ts.iloc[i_slc])
assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
assert_series_equal(ts.ix[l_slc], ts.iloc[i_slc])
assert_slices_equivalent(SLC[Timedelta(hours=7)::-1], SLC[7::-1])
assert_slices_equivalent(SLC['7 hours'::-1], SLC[7::-1])
assert_slices_equivalent(SLC[:Timedelta(hours=7):-1], SLC[:6:-1])
assert_slices_equivalent(SLC[:'7 hours':-1], SLC[:6:-1])
assert_slices_equivalent(SLC['15 hours':'7 hours':-1], SLC[15:6:-1])
assert_slices_equivalent(SLC[Timedelta(hours=15):Timedelta(hours=7):-
1], SLC[15:6:-1])
assert_slices_equivalent(SLC['15 hours':Timedelta(hours=7):-1],
SLC[15:6:-1])
assert_slices_equivalent(SLC[Timedelta(hours=15):'7 hours':-1],
SLC[15:6:-1])
assert_slices_equivalent(SLC['7 hours':'15 hours':-1], SLC[:0])
def test_slice_with_zero_step_raises(self):
ts = Series(np.arange(20), timedelta_range('0', periods=20, freq='H'))
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts.loc[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts.ix[::0])
def test_tdi_ops_attributes(self):
rng = timedelta_range('2 days', periods=5, freq='2D', name='x')
result = rng + 1
exp = timedelta_range('4 days', periods=5, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
result = rng - 2
exp = timedelta_range('-2 days', periods=5, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
result = rng * 2
exp = timedelta_range('4 days', periods=5, freq='4D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '4D')
result = rng / 2
exp = timedelta_range('1 days', periods=5, freq='D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, 'D')
result = -rng
exp = timedelta_range('-2 days', periods=5, freq='-2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '-2D')
rng = pd.timedelta_range('-2 days', periods=5, freq='D', name='x')
result = abs(rng)
exp = TimedeltaIndex(['2 days', '1 days', '0 days', '1 days',
'2 days'], name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, None)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
mit
|
phdowling/scikit-learn
|
sklearn/cross_decomposition/pls_.py
|
187
|
28507
|
"""
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <[email protected]>
# License: BSD 3 clause
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
# 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
# y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
= _center_scale_xy(X, Y, self.scale)
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
# 1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
# 2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv(np.dot(self.x_loadings_.T, self.x_weights_)))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv(np.dot(self.y_loadings_.T, self.y_weights_)))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d"
" with X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\
_center_scale_xy(X, Y, self.scale)
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
|
bsd-3-clause
|
HolgerPeters/scikit-learn
|
benchmarks/bench_plot_nmf.py
|
28
|
15630
|
"""
Benchmarks of Non-Negative Matrix Factorization
"""
# Authors: Tom Dupre la Tour (benchmark)
# Chih-Jen Linn (original projected gradient NMF implementation)
# Anthony Di Franco (projected gradient, Python and NumPy port)
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import warnings
import numbers
import numpy as np
import matplotlib.pyplot as plt
import pandas
from sklearn.utils.testing import ignore_warnings
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition.nmf import NMF
from sklearn.decomposition.nmf import _initialize_nmf
from sklearn.decomposition.nmf import _beta_divergence
from sklearn.decomposition.nmf import INTEGER_TYPES, _check_init
from sklearn.externals.joblib import Memory
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.extmath import fast_dot, safe_sparse_dot, squared_norm
from sklearn.utils import check_array
from sklearn.utils.validation import check_is_fitted, check_non_negative
mem = Memory(cachedir='.', verbose=0)
###################
# Start of _PGNMF #
###################
# This class implements a projected gradient solver for the NMF.
# The projected gradient solver was removed from scikit-learn in version 0.19,
# and a simplified copy is used here for comparison purpose only.
# It is not tested, and it may change or disappear without notice.
def _norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return np.sqrt(squared_norm(x))
def _nls_subproblem(X, W, H, tol, max_iter, alpha=0., l1_ratio=0.,
sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the projected
gradient descent algorithm.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Constant matrix.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
grad : array-like, shape (n_components, n_features)
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtX = safe_sparse_dot(W.T, X)
WtW = fast_dot(W.T, W)
# values justified in the paper (alpha is renamed gamma)
gamma = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtX
if alpha > 0 and l1_ratio == 1.:
grad += alpha
elif alpha > 0:
grad += alpha * (l1_ratio + (1 - l1_ratio) * H)
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if _norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(20):
# Gradient step.
Hn = H - gamma * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_gamma = not suff_decr
if decr_gamma:
if suff_decr:
H = Hn
break
else:
gamma *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
gamma /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.",
ConvergenceWarning)
return H, grad, n_iter
def _fit_projected_gradient(X, W, H, tol, max_iter, nls_max_iter, alpha,
l1_ratio):
gradW = (np.dot(W, np.dot(H, H.T)) -
safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H) -
safe_sparse_dot(W.T, X, dense_output=True))
init_grad = squared_norm(gradW) + squared_norm(gradH.T)
# max(0.001, tol) to force alternating minimizations of W and H
tolW = max(0.001, tol) * np.sqrt(init_grad)
tolH = tolW
for n_iter in range(1, max_iter + 1):
# stopping condition as discussed in paper
proj_grad_W = squared_norm(gradW * np.logical_or(gradW < 0, W > 0))
proj_grad_H = squared_norm(gradH * np.logical_or(gradH < 0, H > 0))
if (proj_grad_W + proj_grad_H) / init_grad < tol ** 2:
break
# update W
Wt, gradWt, iterW = _nls_subproblem(X.T, H.T, W.T, tolW, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
W, gradW = Wt.T, gradWt.T
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = _nls_subproblem(X, W, H, tolH, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
if iterH == 1:
tolH = 0.1 * tolH
H[H == 0] = 0 # fix up negative zeros
if n_iter == max_iter:
Wt, _, _ = _nls_subproblem(X.T, H.T, W.T, tolW, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
W = Wt.T
return W, H, n_iter
class _PGNMF(NMF):
"""Non-Negative Matrix Factorization (NMF) with projected gradient solver.
This class is private and for comparison purpose only.
It may change or disappear without notice.
"""
def __init__(self, n_components=None, solver='pg', init=None,
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., nls_max_iter=10):
self.nls_max_iter = nls_max_iter
self.n_components = n_components
self.init = init
self.solver = solver
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.alpha = alpha
self.l1_ratio = l1_ratio
def fit(self, X, y=None, **params):
self.fit_transform(X, **params)
return self
def transform(self, X):
check_is_fitted(self, 'components_')
H = self.components_
W, _, self.n_iter_ = self._fit_transform(X, H=H, update_H=False)
return W
def inverse_transform(self, W):
check_is_fitted(self, 'components_')
return np.dot(W, self.components_)
def fit_transform(self, X, y=None, W=None, H=None):
W, H, self.n_iter = self._fit_transform(X, W=W, H=H, update_H=True)
self.components_ = H
return W
def _fit_transform(self, X, y=None, W=None, H=None, update_H=True):
X = check_array(X, accept_sparse=('csr', 'csc'))
check_non_negative(X, "NMF (input X)")
n_samples, n_features = X.shape
n_components = self.n_components
if n_components is None:
n_components = n_features
if (not isinstance(n_components, INTEGER_TYPES) or
n_components <= 0):
raise ValueError("Number of components must be a positive integer;"
" got (n_components=%r)" % n_components)
if not isinstance(self.max_iter, INTEGER_TYPES) or self.max_iter < 0:
raise ValueError("Maximum number of iterations must be a positive "
"integer; got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
# check W and H, or initialize them
if self.init == 'custom' and update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
_check_init(W, (n_samples, n_components), "NMF (input W)")
elif not update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
W = np.zeros((n_samples, n_components))
else:
W, H = _initialize_nmf(X, n_components, init=self.init,
random_state=self.random_state)
if update_H: # fit_transform
W, H, n_iter = _fit_projected_gradient(
X, W, H, self.tol, self.max_iter, self.nls_max_iter,
self.alpha, self.l1_ratio)
else: # transform
Wt, _, n_iter = _nls_subproblem(X.T, H.T, W.T, self.tol,
self.nls_max_iter,
alpha=self.alpha,
l1_ratio=self.l1_ratio)
W = Wt.T
if n_iter == self.max_iter and self.tol > 0:
warnings.warn("Maximum number of iteration %d reached. Increase it"
" to improve convergence." % self.max_iter,
ConvergenceWarning)
return W, H, n_iter
#################
# End of _PGNMF #
#################
def plot_results(results_df, plot_name):
if results_df is None:
return None
plt.figure(figsize=(16, 6))
colors = 'bgr'
markers = 'ovs'
ax = plt.subplot(1, 3, 1)
for i, init in enumerate(np.unique(results_df['init'])):
plt.subplot(1, 3, i + 1, sharex=ax, sharey=ax)
for j, method in enumerate(np.unique(results_df['method'])):
mask = np.logical_and(results_df['init'] == init,
results_df['method'] == method)
selected_items = results_df[mask]
plt.plot(selected_items['time'], selected_items['loss'],
color=colors[j % len(colors)], ls='-',
marker=markers[j % len(markers)],
label=method)
plt.legend(loc=0, fontsize='x-small')
plt.xlabel("Time (s)")
plt.ylabel("loss")
plt.title("%s" % init)
plt.suptitle(plot_name, fontsize=16)
@ignore_warnings(category=ConvergenceWarning)
# use joblib to cache the results.
# X_shape is specified in arguments for avoiding hashing X
@mem.cache(ignore=['X', 'W0', 'H0'])
def bench_one(name, X, W0, H0, X_shape, clf_type, clf_params, init,
n_components, random_state):
W = W0.copy()
H = H0.copy()
clf = clf_type(**clf_params)
st = time()
W = clf.fit_transform(X, W=W, H=H)
end = time()
H = clf.components_
this_loss = _beta_divergence(X, W, H, 2.0, True)
duration = end - st
return this_loss, duration
def run_bench(X, clfs, plot_name, n_components, tol, alpha, l1_ratio):
start = time()
results = []
for name, clf_type, iter_range, clf_params in clfs:
print("Training %s:" % name)
for rs, init in enumerate(('nndsvd', 'nndsvdar', 'random')):
print(" %s %s: " % (init, " " * (8 - len(init))), end="")
W, H = _initialize_nmf(X, n_components, init, 1e-6, rs)
for max_iter in iter_range:
clf_params['alpha'] = alpha
clf_params['l1_ratio'] = l1_ratio
clf_params['max_iter'] = max_iter
clf_params['tol'] = tol
clf_params['random_state'] = rs
clf_params['init'] = 'custom'
clf_params['n_components'] = n_components
this_loss, duration = bench_one(name, X, W, H, X.shape,
clf_type, clf_params,
init, n_components, rs)
init_name = "init='%s'" % init
results.append((name, this_loss, duration, init_name))
# print("loss: %.6f, time: %.3f sec" % (this_loss, duration))
print(".", end="")
sys.stdout.flush()
print(" ")
# Use a panda dataframe to organize the results
results_df = pandas.DataFrame(results,
columns="method loss time init".split())
print("Total time = %0.3f sec\n" % (time() - start))
# plot the results
plot_results(results_df, plot_name)
return results_df
def load_20news():
print("Loading 20 newsgroups dataset")
print("-----------------------------")
from sklearn.datasets import fetch_20newsgroups
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, stop_words='english')
tfidf = vectorizer.fit_transform(dataset.data)
return tfidf
def load_faces():
print("Loading Olivetti face dataset")
print("-----------------------------")
from sklearn.datasets import fetch_olivetti_faces
faces = fetch_olivetti_faces(shuffle=True)
return faces.data
def build_clfs(cd_iters, pg_iters, mu_iters):
clfs = [("Coordinate Descent", NMF, cd_iters, {'solver': 'cd'}),
("Projected Gradient", _PGNMF, pg_iters, {'solver': 'pg'}),
("Multiplicative Update", NMF, mu_iters, {'solver': 'mu'}),
]
return clfs
if __name__ == '__main__':
alpha = 0.
l1_ratio = 0.5
n_components = 10
tol = 1e-15
# first benchmark on 20 newsgroup dataset: sparse, shape(11314, 39116)
plot_name = "20 Newsgroups sparse dataset"
cd_iters = np.arange(1, 30)
pg_iters = np.arange(1, 6)
mu_iters = np.arange(1, 30)
clfs = build_clfs(cd_iters, pg_iters, mu_iters)
X_20news = load_20news()
run_bench(X_20news, clfs, plot_name, n_components, tol, alpha, l1_ratio)
# second benchmark on Olivetti faces dataset: dense, shape(400, 4096)
plot_name = "Olivetti Faces dense dataset"
cd_iters = np.arange(1, 30)
pg_iters = np.arange(1, 12)
mu_iters = np.arange(1, 30)
clfs = build_clfs(cd_iters, pg_iters, mu_iters)
X_faces = load_faces()
run_bench(X_faces, clfs, plot_name, n_components, tol, alpha, l1_ratio,)
plt.show()
|
bsd-3-clause
|
cbuntain/redditResponseExtractor
|
tester.py
|
1
|
1422
|
#!/usr/bin/python
import sys
import csv
import pprint
import cPickle
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
classifierPath = sys.argv[1]
csvFilePath = sys.argv[2]
print "Using classifer: ", classifierPath
print "Will classify data in: ", csvFilePath
dataList = []
header = []
userIndex = []
with open(csvFilePath, 'r') as csvFile:
csvObj = csv.reader(csvFile, delimiter=',')
for headerStr in csvObj.next():
header.append(headerStr.strip())
for row in csvObj:
# strippedRow = [x.strip() for x in row]
strippedRow = []
for i in range(len(row)):
if ( i < 2 ):
strippedRow.append(row[i].strip())
else:
strippedRow.append(float(row[i].strip()))
dataList.append(strippedRow)
userIndex.append("%s-%s"%(strippedRow[0], strippedRow[1]))
df = pd.DataFrame(dataList, columns=header, index=userIndex)
classifier = None
with open(classifierPath, 'rb') as fid:
classifier = cPickle.load(fid)
featureList = [
# 'neighbors',
'density',
'degreedist',
'neighborprop',
'tieprop',
'cluster',
'triangle'
]
testData = df[featureList]
predictedLabels = classifier.predict(testData)
print testData
print predictedLabels
print "------"
testData['isanswer'] = predictedLabels
print testData
|
mit
|
cwoodall/doppler-gestures-py
|
pydoppler/ambiguity.py
|
2
|
3333
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# ryanvolz's Ambiguity Function](https://gist.github.com/ryanvolz/8b0d9f3e48ec8ddcef4d
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
def ambiguity(code, nfreq=1):
"""Calculate the ambiguity function of code for nfreq frequencies.
The ambiguity function is the square of the autocorrelation,
normalized so the peak value is 1.
For correct results, we require that nfreq >= len(code).
The result is a 2-D array with the first index corresponding
to frequency shift. The code is frequency shifted by
normalized frequencies of range(nfreq)/nfreq and correlated
with the baseband code. The result amb[0] gives the
ambiguity with 0 frequency shift, amb[1] with 1/nfreq
frequency shift, etc. These frequencies are the same as (and
are in the same order as) the FFT frequencies for an nfreq-
length FFT.
****Thus, the peak value is at amb[0, len(code) - 1]****
To relocate the peak to the middle of the result, use
np.fft.fftshift(amb, axes=0)
To relocate the peak to the [0, 0] entry, use
np.fft.ifftshift(amb, axes=1)
"""
inlen = len(code)
outlen = 2*inlen - 1
#if nfreq < inlen:
# nfreq = inlen
# Doppler shift the code to form a correlation bank in the form of a matrix
doppleridx = np.arange(nfreq)[:, np.newaxis]*np.arange(inlen)
dopplermat = np.exp(2*np.pi*1j*doppleridx/nfreq)
# code is conjugated to form matched correlation
codebank = code.conj()*dopplermat
# initialize the output autocorrelation array
acorr = np.zeros((nfreq, outlen), np.complex_)
# correlate the Doppler-shifted codes with the original code
# to get autocorrelation
for k, shifted_code in enumerate(codebank):
acorr[k] = np.correlate(code, shifted_code, mode='full')
# calculate ambiguity function as normalized square magnitude of autocorrelation
# (index of peak value is [0, inlen - 1])
amb = np.abs(acorr / acorr[0, inlen - 1])**2
return amb
def plotamb(code, channels, tone, window, rate):
def update(frame_number):
barker13 = np.asarray(code[0], np.complex)*mixer_sin
b13amb = ambiguity(barker13, window)
im.set_data(np.fft.fftshift(b13amb, axes=0).T)
return im
def init():
barker13 = np.ones(L, np.complex)
b13amb = ambiguity(barker13, window)
im.set_data(a*np.fft.fftshift(b13amb, axes=0).T)
return im
fig = plt.figure()
plt.xlabel('Frequency Index')
plt.ylabel('Delay Index')
barker13 = np.asarray(code[0], np.complex)
L = len(barker13)
b13amb = np.empty((window, 2*L-1), np.float)
b13amb = ambiguity(barker13, window)
if channels == 2:
mixer_sin = np.array([(np.exp(2*np.pi*1j*tone*i/rate)) for i in range(L)])
else:
mixer_sin = np.array([(np.sin(2*np.pi*1 *tone*i/rate)) for i in range(L)])
im = plt.imshow(
np.fft.fftshift(b13amb, axes=0).T,
extent=(0-window/2, 0+window/2, -L, L),
aspect='auto', interpolation='none', origin='lower')
anim = animation.FuncAnimation(fig, update, interval=50,)
#anim = animation.FuncAnimation(fig, update, init_func=init, interval=50, blit=True,)
plt.show()
return 0
|
mit
|
r03ert0/ldsc
|
ldscore/parse.py
|
1
|
10016
|
'''
(c) 2014 Brendan Bulik-Sullivan and Hilary Finucane
This module contains functions for parsing various ldsc-defined file formats.
'''
from __future__ import division
import numpy as np
import pandas as pd
import os
def series_eq(x, y):
'''Compare series, return False if lengths not equal.'''
return len(x) == len(y) and (x == y).all()
def read_csv(fh, **kwargs):
return pd.read_csv(fh, delim_whitespace=True, na_values='.', **kwargs)
def sub_chr(s, chr):
'''Substitute chr for @, else append chr to the end of str.'''
if '@' not in s:
s += '@'
return s.replace('@', str(chr))
def which_compression(fh):
'''Given a file prefix, figure out what sort of compression to use.'''
if os.access(fh + '.bz2', 4):
suffix = '.bz2'
compression = 'bz2'
elif os.access(fh + '.gz', 4):
suffix = '.gz'
compression = 'gzip'
elif os.access(fh, 4):
suffix = ''
compression = None
else:
raise IOError('Could not open {F}[./gz/bz2]'.format(F=fh))
return suffix, compression
def get_compression(fh):
'''Which sort of compression should we use with read_csv?'''
if fh.endswith('gz'):
compression = 'gzip'
elif fh.endswith('bz2'):
compression = 'bz2'
else:
compression = None
return compression
def read_cts(fh, match_snps):
'''Reads files for --cts-bin.'''
compression = get_compression(fh)
cts = read_csv(fh, compression=compression, header=None, names=['SNP', 'ANNOT'])
if not series_eq(cts.SNP, match_snps):
raise ValueError('--cts-bin and the .bim file must have identical SNP columns.')
return cts.ANNOT.values
def sumstats(fh, alleles=False, dropna=True):
'''Parses .sumstats files. See docs/file_formats_sumstats.txt.'''
dtype_dict = {'SNP': str, 'Z': float, 'N': float, 'A1': str, 'A2': str}
compression = get_compression(fh)
usecols = ['SNP', 'Z', 'N']
if alleles:
usecols += ['A1', 'A2']
try:
x = read_csv(fh, usecols=usecols, dtype=dtype_dict, compression=compression)
except (AttributeError, ValueError) as e:
raise ValueError('Improperly formatted sumstats file: ' + str(e.args))
if dropna:
x = x.dropna(how='any')
return x
def ldscore_fromlist(flist, num=None):
'''Sideways concatenation of a list of LD Score files.'''
ldscore_array = []
for i, fh in enumerate(flist):
y = ldscore(fh, num)
if i > 0:
if not series_eq(y.SNP, ldscore_array[0].SNP):
raise ValueError('LD Scores for concatenation must have identical SNP columns.')
else: # keep SNP column from only the first file
y = y.drop(['SNP'], axis=1)
new_col_dict = {c: c + '_' + str(i) for c in y.columns if c != 'SNP'}
y.rename(columns=new_col_dict, inplace=True)
ldscore_array.append(y)
return pd.concat(ldscore_array, axis=1)
def l2_parser(fh, compression):
'''Parse LD Score files'''
x = read_csv(fh, header=0, compression=compression)
if 'MAF' in x.columns and 'CM' in x.columns: # for backwards compatibility w/ v<1.0.0
x = x.drop(['MAF', 'CM'], axis=1)
return x
def annot_parser(fh, compression, frqfile_full=None, compression_frq=None):
'''Parse annot files'''
df_annot = read_csv(fh, header=0, compression=compression).drop(['CHR', 'BP', 'CM'], axis=1)
df_annot.iloc[:, 1:] = df_annot.iloc[:, 1:].astype(float)
if frqfile_full is not None:
df_frq = frq_parser(frqfile_full, compression_frq)
df_annot = df_annot[(.95 > df_frq.FRQ) & (df_frq.FRQ > 0.05)]
return df_annot
def frq_parser(fh, compression):
'''Parse frequency files.'''
df = read_csv(fh, header=0, compression=compression)
if 'MAF' in df.columns:
df.rename(columns={'MAF': 'FRQ'}, inplace=True)
return df[['SNP', 'FRQ']]
def ldscore(fh, num=None):
'''Parse .l2.ldscore files, split across num chromosomes. See docs/file_formats_ld.txt.'''
suffix = '.l2.ldscore'
if num is not None: # num files, e.g., one per chromosome
first_fh = sub_chr(fh, 1) + suffix
s, compression = which_compression(first_fh)
chr_ld = [l2_parser(sub_chr(fh, i) + suffix + s, compression) for i in xrange(1, num + 1)]
x = pd.concat(chr_ld) # automatically sorted by chromosome
else: # just one file
s, compression = which_compression(fh + suffix)
x = l2_parser(fh + suffix + s, compression)
x = x.sort(['CHR', 'BP']) # SEs will be wrong unless sorted
x = x.drop(['CHR', 'BP'], axis=1).drop_duplicates(subset='SNP')
return x
def M(fh, num=None, N=2, common=False):
'''Parses .l{N}.M files, split across num chromosomes. See docs/file_formats_ld.txt.'''
parsefunc = lambda y: [float(z) for z in open(y, 'r').readline().split()]
suffix = '.l' + str(N) + '.M'
if common:
suffix += '_5_50'
if num is not None:
x = np.sum([parsefunc(sub_chr(fh, i) + suffix) for i in xrange(1, num + 1)], axis=0)
else:
x = parsefunc(fh + suffix)
return np.array(x).reshape((1, len(x)))
def M_fromlist(flist, num=None, N=2, common=False):
'''Read a list of .M* files and concatenate sideways.'''
return np.hstack([M(fh, num, N, common) for fh in flist])
def annot(fh_list, num=None, frqfile=None):
'''
Parses .annot files and returns an overlap matrix. See docs/file_formats_ld.txt.
If num is not None, parses .annot files split across [num] chromosomes (e.g., the
output of parallelizing ldsc.py --l2 across chromosomes).
'''
annot_suffix = ['.annot' for fh in fh_list]
annot_compression = []
if num is not None: # 22 files, one for each chromosome
for i, fh in enumerate(fh_list):
first_fh = sub_chr(fh, 1) + annot_suffix[i]
annot_s, annot_comp_single = which_compression(first_fh)
annot_suffix[i] += annot_s
annot_compression.append(annot_comp_single)
if frqfile is not None:
frq_suffix = '.frq'
first_frqfile = sub_chr(frqfile, 1) + frq_suffix
frq_s, frq_compression = which_compression(first_frqfile)
frq_suffix += frq_s
y = []
M_tot = 0
for chr in xrange(1, num + 1):
if frqfile is not None:
df_annot_chr_list = [annot_parser(sub_chr(fh, chr) + annot_suffix[i], annot_compression[i],
sub_chr(frqfile, chr) + frq_suffix, frq_compression)
for i, fh in enumerate(fh_list)]
else:
df_annot_chr_list = [annot_parser(sub_chr(fh, chr) + annot_suffix[i], annot_compression[i])
for i, fh in enumerate(fh_list)]
annot_matrix_chr_list = [np.matrix(df_annot_chr.ix[:, 1:]) for df_annot_chr in df_annot_chr_list]
annot_matrix_chr = np.hstack(annot_matrix_chr_list)
y.append(np.dot(annot_matrix_chr.T, annot_matrix_chr))
M_tot += len(df_annot_chr_list[0])
x = sum(y)
else: # just one file
for i, fh in enumerate(fh_list):
annot_s, annot_comp_single = which_compression(fh + annot_suffix[i])
annot_suffix[i] += annot_s
annot_compression.append(annot_comp_single)
if frqfile is not None:
frq_suffix = '.frq'
frq_s, frq_compression = which_compression(frqfile + frq_suffix)
frq_suffix += frq_s
df_annot_list = [annot_parser(fh + annot_suffix[i], annot_compression[i],
frqfile + frq_suffix, frq_compression) for i, fh in enumerate(fh_list)]
else:
df_annot_list = [annot_parser(fh + annot_suffix[i], annot_compression[i])
for i, fh in enumerate(fh_list)]
annot_matrix_list = [np.matrix(y.ix[:, 1:]) for y in df_annot_list]
annot_matrix = np.hstack(annot_matrix_list)
x = np.dot(annot_matrix.T, annot_matrix)
M_tot = len(df_annot_list[0])
return x, M_tot
def __ID_List_Factory__(colnames, keepcol, fname_end, header=None, usecols=None):
class IDContainer(object):
def __init__(self, fname):
self.__usecols__ = usecols
self.__colnames__ = colnames
self.__keepcol__ = keepcol
self.__fname_end__ = fname_end
self.__header__ = header
self.__read__(fname)
self.n = len(self.IDList)
def __read__(self, fname):
end = self.__fname_end__
if end and not fname.endswith(end):
raise ValueError('{f} filename must end in {f}'.format(f=end))
comp = get_compression(fname)
self.df = pd.read_csv(fname, header=self.__header__, usecols=self.__usecols__,
delim_whitespace=True, compression=comp)
if self.__colnames__:
self.df.columns = self.__colnames__
self.IDList = self.df.iloc[:, [self.__keepcol__]].astype('object')
def loj(self, externalDf):
'''Returns indices of those elements of self.IDList that appear in exernalDf.'''
r = externalDf.columns[0]
l = self.IDList.columns[0]
merge_df = externalDf.iloc[:, [0]]
merge_df['keep'] = True
z = pd.merge(self.IDList, merge_df, how='left', left_on=l, right_on=r,
sort=False)
ii = z['keep'] == True
return np.nonzero(ii)[0]
return IDContainer
PlinkBIMFile = __ID_List_Factory__(['CHR', 'SNP', 'CM', 'BP', 'A1', 'A2'], 1, '.bim', usecols=[0, 1, 2, 3, 4, 5])
PlinkFAMFile = __ID_List_Factory__(['IID'], 0, '.fam', usecols=[1])
FilterFile = __ID_List_Factory__(['ID'], 0, None, usecols=[0])
AnnotFile = __ID_List_Factory__(None, 2, None, header=0, usecols=None)
|
gpl-3.0
|
wfgoh/wien2plot
|
wien2plot.py
|
1
|
4933
|
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import ast
from scipy import constants
import argparse
import os
def readene(fermi,filename) :
fermi=fermi
f = open(filename,'r')
f_inp = f.readlines()
f.close()
nbnd=maxbnd
ib=maxkpt
ene=np.zeros((maxbnd,maxkpt))
ik=0
klabel=[]
kindex=[]
label=[]
i = 0
for line in f_inp:
if line.isupper() or line.islower() :
break
i += 1
for line in f_inp[i:]:
if isinstance(ast.literal_eval(line.split()[0]),float):
if list(line.split()[2])[-1].isalpha():
for char in list(line.split()[2]):
if char.isalpha():
if char != 'E':
label.append(char)
klabel.append(''.join(label).replace('GAMMA','$\Gamma$'))
kindex.append(ik)
label=[]
ik+=1
nbnd=min(nbnd,ib)
ib=0
else:
ib+=1
ene[ib][ik]=(float(line.split()[1].replace('D','E'))-fermi)*constants.Rydberg*constants.h*constants.c/constants.eV
return ene, nbnd, ik, kindex, klabel
def plotene(ene,kpoint,nbnd,label,color) :
for ib in range(nbnd):
myplot = plt.plot(kpoint, ene[ib+1][kpoint+1], color)
myplot = plt.plot(kpoint, ene[1][kpoint+1], color, label=label)
return myplot
parser = argparse.ArgumentParser()
parser.add_argument('-n', choices=[1,2,3,4,5,6], default=2, type=int, help='specify number of bandplots (default: %(default)s)')
parser.add_argument('-f', metavar='FILENAME', nargs='+', default=[os.path.basename(os.getcwd())+'.energy', os.path.basename(os.getcwd())+'.energyso'], help='specify band energy filenames (default: %(default)s)')
parser.add_argument('-e', metavar='FERMIENERGY', nargs='+', default=[0.0, 0.0], type=float, help='specify Fermi energy (in Ry) in the order of the filenames given (default: %(default)s)')
parser.add_argument('-l', metavar='LEGEND', nargs='+', default=['Legend1', 'Legend2'], help='specify legend descriptions in the order of the filenames given (default: %(default)s)')
parser.add_argument('-r', metavar='ENERGY', nargs=2, default=[-2.0, 2.0], type=float, help='[Optional] specify energy range (default: %(default)s)')
parser.add_argument('-s', metavar='FERMILEVEL', nargs='+', default=0.0, type=float, help='[Optional] specify Fermi level (in eV) for plotting (default: %(default)s)')
parser.add_argument('-o', metavar='FILENAME', default='band.eps', help='[Optional] Specify output filenames (default: %(default)s)')
parser.add_argument('-b', metavar='INTEGER', default=200, type=int, help='[Optional] Specify an integer greater than the number of bands in case.energy (default: %(default)s)')
parser.add_argument('-k', metavar='INTEGER', default=800, type=int, help='[Optional] Specify an integer greater than the number of k-points in case.klist_band (default: %(default)s)')
#parser.add_argument('-i', action='store_true', help='[Optional] Read input from input file bandplot.inp')
args = parser.parse_args()
'''
if args.i :
f = open('bandplot.inp','r')
finp = f.readlines()
f.close()
fnam = []
efer = []
lege = []
args.n = ast.literal_eval(finp[0])
for i in range(args.n) :
fnam.append(finp[i+1].split()[0])
efer.append(ast.literal_eval(finp[i+1].split()[1]))
lege.append(finp[i+1].split()[2])
args.f = fnam
args.e = efer
args.l = lege
rang = [ast.literal_eval(finp[args.n+1].split()[0]),ast.literal_eval(finp[args.n+1].split()[1])]
args.r = rang
args.s = ast.literal_eval(finp[args.n+2])
args.o = finp[args.n+3]
args.b = ast.literal_eval(finp[args.n+4])
args.k = ast.literal_eval(finp[args.n+5])
'''
maxbnd = args.b
maxkpt = args.k
color = ['b-','g-','r-','c-', 'm-', 'y-']
for i in range(args.n):
ene,nbnd,nkpt,kindex,klabel = readene(args.e[i],args.f[i])
kpoint=np.arange(nkpt)
myplot = plotene(ene,kpoint,nbnd,args.l[i],color[i])
efermi = args.s #fermi*constants.Rydberg*constants.h*constants.c/constants.eV
emin = efermi + args.r[0]
emax = efermi + args.r[1]
kmin = kpoint[0]
kmax = kpoint[nkpt - 1]
matplotlib.rc('figure', figsize=(8.0, 6.0))
matplotlib.rc('axes', linewidth=1.5)
matplotlib.rc('lines', linewidth=1.5)
matplotlib.rc('font', size=18.0)
matplotlib.rc('xtick.major', size=0.0, pad=8.0)
matplotlib.rc('xtick.minor', size=0.0, pad=8.0)
matplotlib.rc('ytick.major', size=6.0, pad=8.0)
matplotlib.rc('ytick.minor', size=3.0, pad=8.0)
kvalue = []
for i in range(len(kindex)):
kvalue.append(kpoint[kindex[i]])
myxticks = plt.xticks(kvalue, klabel)
for i in range(len(kindex)):
if kvalue[i] != kmin and kvalue[i] != kmax:
myplot = plt.plot([kvalue[i], kvalue[i]], [emin, emax], 'k-')
myplot = plt.plot([kmin, kmax], [efermi, efermi], 'k-')
mygca = plt.gca()
for i in mygca.get_xticklines() + mygca.get_yticklines():
i.set_markeredgewidth(1.5)
myaxis = plt.axis([kmin, kmax, emin, emax])
myxlabel = plt.xlabel('Wavevector')
myylabel = plt.ylabel('Energy (eV)')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=3, mode="expand", borderaxespad=0.)
plt.savefig(args.o, bbox_inches='tight')
|
mit
|
boomsbloom/dtm-fmri
|
DTM/for_gensim/lib/python2.7/site-packages/matplotlib/tests/test_backend_pgf.py
|
7
|
6033
|
# -*- encoding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import os
import shutil
import numpy as np
import nose
from nose.plugins.skip import SkipTest
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.compat import subprocess
from matplotlib.testing.compare import compare_images, ImageComparisonFailure
from matplotlib.testing.decorators import _image_directories, switch_backend
baseline_dir, result_dir = _image_directories(lambda: 'dummy func')
def check_for(texsystem):
header = """
\\documentclass{minimal}
\\usepackage{pgf}
\\begin{document}
\\typeout{pgfversion=\\pgfversion}
\\makeatletter
\\@@end
"""
try:
latex = subprocess.Popen(["xelatex", "-halt-on-error"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, stderr = latex.communicate(header.encode("utf8"))
except OSError:
return False
return latex.returncode == 0
def compare_figure(fname, savefig_kwargs={}):
actual = os.path.join(result_dir, fname)
plt.savefig(actual, **savefig_kwargs)
expected = os.path.join(result_dir, "expected_%s" % fname)
shutil.copyfile(os.path.join(baseline_dir, fname), expected)
err = compare_images(expected, actual, tol=14)
if err:
raise ImageComparisonFailure('images not close: %s vs. '
'%s' % (actual, expected))
def create_figure():
plt.figure()
x = np.linspace(0, 1, 15)
# line plot
plt.plot(x, x ** 2, "b-")
# marker
plt.plot(x, 1 - x**2, "g>")
# filled paths and patterns
plt.fill_between([0., .4], [.4, 0.], hatch='//', facecolor="lightgray",
edgecolor="red")
plt.fill([3, 3, .8, .8, 3], [2, -2, -2, 0, 2], "b")
# text and typesetting
plt.plot([0.9], [0.5], "ro", markersize=3)
plt.text(0.9, 0.5, 'unicode (ü, °, µ) and math ($\\mu_i = x_i^2$)',
ha='right', fontsize=20)
plt.ylabel('sans-serif, blue, $\\frac{\\sqrt{x}}{y^2}$..',
family='sans-serif', color='blue')
plt.xlim(0, 1)
plt.ylim(0, 1)
# test compiling a figure to pdf with xelatex
@switch_backend('pgf')
def test_xelatex():
if not check_for('xelatex'):
raise SkipTest('xelatex + pgf is required')
rc_xelatex = {'font.family': 'serif',
'pgf.rcfonts': False}
mpl.rcParams.update(rc_xelatex)
create_figure()
compare_figure('pgf_xelatex.pdf')
# test compiling a figure to pdf with pdflatex
@switch_backend('pgf')
def test_pdflatex():
if not check_for('pdflatex'):
raise SkipTest('pdflatex + pgf is required')
rc_pdflatex = {'font.family': 'serif',
'pgf.rcfonts': False,
'pgf.texsystem': 'pdflatex',
'pgf.preamble': ['\\usepackage[utf8x]{inputenc}',
'\\usepackage[T1]{fontenc}']}
mpl.rcParams.update(rc_pdflatex)
create_figure()
compare_figure('pgf_pdflatex.pdf')
# test updating the rc parameters for each figure
@switch_backend('pgf')
def test_rcupdate():
if not check_for('xelatex') or not check_for('pdflatex'):
raise SkipTest('xelatex and pdflatex + pgf required')
rc_sets = []
rc_sets.append({'font.family': 'sans-serif',
'font.size': 30,
'figure.subplot.left': .2,
'lines.markersize': 10,
'pgf.rcfonts': False,
'pgf.texsystem': 'xelatex'})
rc_sets.append({'font.family': 'monospace',
'font.size': 10,
'figure.subplot.left': .1,
'lines.markersize': 20,
'pgf.rcfonts': False,
'pgf.texsystem': 'pdflatex',
'pgf.preamble': ['\\usepackage[utf8x]{inputenc}',
'\\usepackage[T1]{fontenc}',
'\\usepackage{sfmath}']})
for i, rc_set in enumerate(rc_sets):
mpl.rcParams.update(rc_set)
create_figure()
compare_figure('pgf_rcupdate%d.pdf' % (i + 1))
# test backend-side clipping, since large numbers are not supported by TeX
@switch_backend('pgf')
def test_pathclip():
if not check_for('xelatex'):
raise SkipTest('xelatex + pgf is required')
rc_xelatex = {'font.family': 'serif',
'pgf.rcfonts': False}
mpl.rcParams.update(rc_xelatex)
plt.figure()
plt.plot([0., 1e100], [0., 1e100])
plt.xlim(0, 1)
plt.ylim(0, 1)
# this test passes if compiling/saving to pdf works (no image comparison)
plt.savefig(os.path.join(result_dir, "pgf_pathclip.pdf"))
# test mixed mode rendering
@switch_backend('pgf')
def test_mixedmode():
if not check_for('xelatex'):
raise SkipTest('xelatex + pgf is required')
rc_xelatex = {'font.family': 'serif',
'pgf.rcfonts': False}
mpl.rcParams.update(rc_xelatex)
Y, X = np.ogrid[-1:1:40j, -1:1:40j]
plt.figure()
plt.pcolor(X**2 + Y**2).set_rasterized(True)
compare_figure('pgf_mixedmode.pdf')
# test bbox_inches clipping
@switch_backend('pgf')
def test_bbox_inches():
if not check_for('xelatex'):
raise SkipTest('xelatex + pgf is required')
rc_xelatex = {'font.family': 'serif',
'pgf.rcfonts': False}
mpl.rcParams.update(rc_xelatex)
Y, X = np.ogrid[-1:1:40j, -1:1:40j]
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax1.plot(range(5))
ax2 = fig.add_subplot(122)
ax2.plot(range(5))
plt.tight_layout()
bbox = ax1.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
compare_figure('pgf_bbox_inches.pdf', savefig_kwargs={'bbox_inches': bbox})
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
|
mit
|
rdjdejong/Leren2016
|
kNN1.py
|
1
|
2525
|
from __future__ import division
import numpy as np
import pandas as pd
# Finds the most common item
# taken from http://stackoverflow.com/questions/1518522/python-most-common-element-in-a-list
def most_common(lst):
return max(set(lst), key=lst.count)
# simply determines the accuracy of our predicted outputs
def accuracy(lst):
correct = 0
for pair in lst:
if pair[0] == pair[1]:
correct += 1
return correct/len(lst)
class kNN:
''' K-Nearest Neighbor
* In this class, we implemented the KNN-algorithm
'''
test_data = None
train_data = None
# read the data
def __init__(self, fileNameTrain, fileNameTest):
self.train_data = pd.read_csv(fileNameTrain, header = None, sep = ';')\
.values
self.test_data = pd.read_csv(fileNameTest, header = None, sep = ';')\
.values
# determines if we want a single example or take the whole training set
def find_neighbors(self, k, test_case = -1):
if test_case < 0:
outputs = []
for i in range(len(self.test_data[:,0])):
predicted = self.find_neighbor(k, self.test_data[i])
outputs.append((predicted, self.test_data[i, -1]))
return outputs
else:
return self.find_neighbor(k, self.test_data[test_case])
# find the neighbors
def find_neighbor(self, k, test_case):
# calculate the distance
distance = np.power\
((self.train_data[:,:-1] - test_case[:-1]), 2)
distance = np.sum(distance, axis=1)
distance = np.sqrt(distance)
votes = []
# finds the K-Nearest neighbors
for i in range(k):
min_index = np.argmin(distance)
min_vote = self.train_data[min_index, -1]
votes.append(min_vote.item())
distance = np.delete(distance, min_index)
return most_common(votes)
if __name__ == '__main__':
kNN_trainer = kNN('digist123-1.csv', 'digist123-2.csv')
print accuracy(kNN_trainer.find_neighbors(1))
print accuracy(kNN_trainer.find_neighbors(2))
print accuracy(kNN_trainer.find_neighbors(3))
print accuracy(kNN_trainer.find_neighbors(4))
print accuracy(kNN_trainer.find_neighbors(5))
print accuracy(kNN_trainer.find_neighbors(6))
print accuracy(kNN_trainer.find_neighbors(7))
print accuracy(kNN_trainer.find_neighbors(8))
print accuracy(kNN_trainer.find_neighbors(9))
print accuracy(kNN_trainer.find_neighbors(10))
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.