repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
MSeifert04/astropy
|
examples/io/plot_fits-image.py
|
11
|
1898
|
# -*- coding: utf-8 -*-
"""
=======================================
Read and plot an image from a FITS file
=======================================
This example opens an image stored in a FITS file and displays it to the screen.
This example uses `astropy.utils.data` to download the file, `astropy.io.fits` to open
the file, and `matplotlib.pyplot` to display the image.
*By: Lia R. Corrales, Adrian Price-Whelan, Kelle Cruz*
*License: BSD*
"""
##############################################################################
# Set up matplotlib and use a nicer set of plot parameters
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Download the example FITS files used by this example:
from astropy.utils.data import get_pkg_data_filename
from astropy.io import fits
image_file = get_pkg_data_filename('tutorials/FITS-images/HorseHead.fits')
##############################################################################
# Use `astropy.io.fits.info()` to display the structure of the file:
fits.info(image_file)
##############################################################################
# Generally the image information is located in the Primary HDU, also known
# as extension 0. Here, we use `astropy.io.fits.getdata()` to read the image
# data from this first extension using the keyword argument ``ext=0``:
image_data = fits.getdata(image_file, ext=0)
##############################################################################
# The data is now stored as a 2D numpy array. Print the dimensions using the
# shape attribute:
print(image_data.shape)
##############################################################################
# Display the image data:
plt.figure()
plt.imshow(image_data, cmap='gray')
plt.colorbar()
|
bsd-3-clause
|
ningchi/scikit-learn
|
sklearn/metrics/cluster/supervised.py
|
21
|
26876
|
"""Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari: float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
|
bsd-3-clause
|
bobflagg/deepER
|
deeper/nerwindow.py
|
1
|
7047
|
from numpy import *
from nn.base import NNBase
from nn.math import softmax, make_onehot
from misc import random_weight_matrix
##
# Evaluation code; do not change this
##
from sklearn import metrics
def full_report(y_true, y_pred, tagnames):
cr = metrics.classification_report(y_true, y_pred, target_names=tagnames)
print cr
def eval_performance(y_true, y_pred, tagnames):
pre, rec, f1, support = metrics.precision_recall_fscore_support(y_true, y_pred)
print "=== Performance (omitting 'O' class) ==="
print "Mean precision: %.02f%%" % (100*sum(pre[1:] * support[1:])/sum(support[1:]))
print "Mean recall: %.02f%%" % (100*sum(rec[1:] * support[1:])/sum(support[1:]))
print "Mean F1: %.02f%%" % (100*sum(f1[1:] * support[1:])/sum(support[1:]))
def compute_f1(y_true, y_pred, tagnames):
_, _, f1, support = metrics.precision_recall_fscore_support(y_true, y_pred)
return 100*sum(f1[1:] * support[1:])/sum(support[1:])
##
# Implement this!
##
class WindowMLP(NNBase):
"""Single hidden layer, plus representation learning."""
def __init__(self, wv, windowsize=3, dims=[None, 100, 5], reg=0.001, alpha=0.01, rseed=10):
"""
Initialize classifier model.
Arguments:
wv : initial word vectors (array |V| x n)
note that this is the transpose of the n x |V| matrix L
described in the handout; you'll want to keep it in
this |V| x n form for efficiency reasons, since numpy
stores matrix rows continguously.
windowsize : int, size of context window
dims : dimensions of [input, hidden, output]
input dimension can be computed from wv.shape
reg : regularization strength (lambda)
alpha : default learning rate
rseed : random initialization seed
"""
# Set regularization
self.lreg = float(reg)
self.alpha = alpha # default training rate
self.nclass = dims[2] # number of output classes
self.windowsize = windowsize # size of context window
self.n = wv.shape[1] # dimension of word vectors
dims[0] = windowsize * wv.shape[1] # input dimension
param_dims = dict(
W=(dims[1], dims[0]),
b1=(dims[1],),
U=(dims[2], dims[1]),
b2=(dims[2],),
)
param_dims_sparse = dict(L=wv.shape)
# initialize parameters: don't change this line
NNBase.__init__(self, param_dims, param_dims_sparse)
random.seed(rseed) # be sure to seed this for repeatability!
#### YOUR CODE HERE ####
self.sparams.L = wv.copy() # store own representations
self.params.W = random_weight_matrix(*self.params.W.shape)
self.params.U = random_weight_matrix(*self.params.U.shape)
# self.params.b1 = zeros((dims[1],1)) # done automatically!
# self.params.b2 = zeros((self.nclass,1)) # done automatically!
#### END YOUR CODE ####
def _acc_grads(self, window, label):
"""
Accumulate gradients, given a training point
(window, label) of the format
window = [x_{i-1} x_{i} x_{i+1}] # three ints
label = {0,1,2,3,4} # single int, gives class
Your code should update self.grads and self.sgrads,
in order for gradient_check and training to work.
So, for example:
self.grads.U += (your gradient dJ/dU)
self.sgrads.L[i] = (gradient dJ/dL[i]) # this adds an update for that index
"""
#### YOUR CODE HERE ####
##
# Forward propagation
# build input context
x = self.build_input_context(window)
# first hidden layer
z1 = self.params.W.dot(x) + self.params.b1
a1 = tanh(z1)
# second hidden layer
z2 = self.params.U.dot(a1) + self.params.b2
a2 = softmax(z2)
##
# Backpropagation
# second hidden layer
delta2 = a2 - make_onehot(label, self.nclass)
self.grads.b2 += delta2
self.grads.U += outer(delta2, a1) + self.lreg * self.params.U
# first hidden layer
delta1 = (1.0 - a1**2) * self.params.U.T.dot(delta2)
self.grads.b1 += delta1
self.grads.W += outer(delta1, x) + self.lreg * self.params.W
for j, idx in enumerate(window):
start = j * self.n
stop = (j + 1) * self.n
self.sgrads.L[idx] = self.params.W[:,start:stop].T.dot(delta1)
#### END YOUR CODE ####
def build_input_context(self, window):
x = zeros((self.windowsize * self.n,))
for j, idx in enumerate(window):
start = j * self.n
stop = (j + 1) * self.n
x[start:stop] = self.sparams.L[idx]
return x
def predict_proba(self, windows):
"""
Predict class probabilities.
Should return a matrix P of probabilities,
with each row corresponding to a row of X.
windows = array (n x windowsize),
each row is a window of indices
"""
# handle singleton input by making sure we have
# a list-of-lists
if not hasattr(windows[0], "__iter__"):
windows = [windows]
#### YOUR CODE HERE ####
# doing this first as a loop
n_windows = len(windows)
P = zeros((n_windows,self.nclass))
for i in range(n_windows):
x = self.build_input_context(windows[i])
# first hidden layer
z1 = self.params.W.dot(x) + self.params.b1
a1 = tanh(z1)
# second hidden layer
z2 = self.params.U.dot(a1) + self.params.b2
P[i,:] = softmax(z2)
'''
x = np.zeros((n_windows,self.windowsize * self.n))
for i in range(n):
x[i,:] = self.build_input_context(window[i])
# first hidden layer
z1 = self.params.W.dot(x) + self.params.b1
a1 = np.tanh(z1)
# second hidden layer
z2 = self.params.U.dot(a1) + self.params.b2
a2 = softmax(z2)
'''
#### END YOUR CODE ####
return P # rows are output for each input
def predict(self, windows):
"""
Predict most likely class.
Returns a list of predicted class indices;
input is same as to predict_proba
"""
#### YOUR CODE HERE ####
P = self.predict_proba(windows)
c = argmax(P, axis=1)
#### END YOUR CODE ####
return c # list of predicted classes
def compute_loss(self, windows, labels):
"""
Compute the loss for a given dataset.
windows = same as for predict_proba
labels = list of class labels, for each row of windows
"""
#### YOUR CODE HERE ####
P = self.predict_proba(windows)
N = P.shape[0]
J = -1.0 * sum(log(P[range(N),labels]))
J += (self.lreg / 2.0) * (sum(self.params.W**2.0) + sum(self.params.U**2.0))
#### END YOUR CODE ####
return J
|
apache-2.0
|
konstantint/matplotlib-venn
|
matplotlib_venn/_util.py
|
1
|
2831
|
'''
Venn diagram plotting routines.
Utility routines
Copyright 2012, Konstantin Tretyakov.
http://kt.era.ee/
Licensed under MIT license.
'''
from matplotlib_venn._venn2 import venn2, compute_venn2_subsets
from matplotlib_venn._venn3 import venn3, compute_venn3_subsets
def venn2_unweighted(subsets, set_labels=('A', 'B'), set_colors=('r', 'g'), alpha=0.4, normalize_to=1.0, subset_areas=(1, 1, 1), ax=None, subset_label_formatter=None):
'''
The version of venn2 without area-weighting.
It is implemented as a wrapper around venn2. Namely, venn2 is invoked as usual, but with all subset areas
set to 1. The subset labels are then replaced in the resulting diagram with the provided subset sizes.
The parameters are all the same as that of venn2.
In addition there is a subset_areas parameter, which specifies the actual subset areas.
(it is (1, 1, 1) by default. You are free to change it, within reason).
'''
v = venn2(subset_areas, set_labels, set_colors, alpha, normalize_to, ax)
# Now rename the labels
if subset_label_formatter is None:
subset_label_formatter = str
subset_ids = ['10', '01', '11']
if isinstance(subsets, dict):
subsets = [subsets.get(t, 0) for t in subset_ids]
elif len(subsets) == 2:
subsets = compute_venn2_subsets(*subsets)
for n, id in enumerate(subset_ids):
lbl = v.get_label_by_id(id)
if lbl is not None:
lbl.set_text(subset_label_formatter(subsets[n]))
return v
def venn3_unweighted(subsets, set_labels=('A', 'B', 'C'), set_colors=('r', 'g', 'b'), alpha=0.4, normalize_to=1.0, subset_areas=(1, 1, 1, 1, 1, 1, 1), ax=None, subset_label_formatter=None):
'''
The version of venn3 without area-weighting.
It is implemented as a wrapper around venn3. Namely, venn3 is invoked as usual, but with all subset areas
set to 1. The subset labels are then replaced in the resulting diagram with the provided subset sizes.
The parameters are all the same as that of venn2.
In addition there is a subset_areas parameter, which specifies the actual subset areas.
(it is (1, 1, 1, 1, 1, 1, 1) by default. You are free to change it, within reason).
'''
v = venn3(subset_areas, set_labels, set_colors, alpha, normalize_to, ax)
# Now rename the labels
if subset_label_formatter is None:
subset_label_formatter = str
subset_ids = ['100', '010', '110', '001', '101', '011', '111']
if isinstance(subsets, dict):
subsets = [subsets.get(t, 0) for t in subset_ids]
elif len(subsets) == 3:
subsets = compute_venn3_subsets(*subsets)
for n, id in enumerate(subset_ids):
lbl = v.get_label_by_id(id)
if lbl is not None:
lbl.set_text(subset_label_formatter(subsets[n]))
return v
|
mit
|
rjurga/plasmon-fluorescence
|
data_processing.py
|
1
|
9749
|
import numpy as np
from scipy import constants
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import computations
def processing(params, materials, geometry, dipole, distance, emission):
"""Convert parameters, get decay rates, save and plot results."""
r = convert_units(geometry['radius'], geometry['unit'])
d_orig = np.linspace(distance['min'], distance['max'], num=distance['n'])
d = convert_units(d_orig, distance['unit'])
em_orig = np.linspace(emission['min'], emission['max'], num=emission['n'])
omega = convert_emission_to_omega(em_orig, emission['label'])
materials['omega_p'] = convert_eV_to_Hz(materials['hbar omega_p'])
materials['gamma'] = convert_eV_to_Hz(materials['hbar gamma'])
eps_metal = permittivity(omega, materials)
eps_inf = bound_response(omega, eps_metal, materials)
gamma_tot, gamma_r = computations.decay_rates_vectorized(params['n_max'], materials['nonlocal'], materials['eps_medium'], eps_metal, eps_inf, materials['omega_p'], materials['gamma'], materials['v_F'], materials['D'], omega, r, d, dipole['orientation'])
gamma_nr = computations.nonradiative_decay_rate(gamma_tot, gamma_r)
q = computations.quantum_efficiency(gamma_tot, gamma_r, dipole['q_0'])
if params['save results']:
save_data(d_orig, distance['unit'], em_orig, emission['label'], gamma_tot, gamma_r, gamma_nr, q)
if params['show results']:
make_plot(d_orig, distance['n'], distance['unit'],
em_orig, emission['n'], emission['label'],
gamma_tot, gamma_r, gamma_nr, q)
def convergence(params, materials, geometry, dipole, distance, emission):
"""Plot decay rates as a function of the max angular mode order."""
r = convert_units(geometry['radius'], geometry['unit'])
d = convert_units(np.array([distance['min']]), distance['unit'])
omega = convert_emission_to_omega(np.array([emission['min']]), emission['label'])
materials['omega_p'] = convert_eV_to_Hz(materials['hbar omega_p'])
materials['gamma'] = convert_eV_to_Hz(materials['hbar gamma'])
eps_metal = permittivity(omega, materials)
eps_inf = bound_response(omega, eps_metal, materials)
gamma_tot = np.empty(params['n_max'])
gamma_r = np.empty(params['n_max'])
for i, n in enumerate(range(1, params['n_max']+1)):
gamma_tot[i], gamma_r[i] = computations.decay_rates_vectorized(n, materials['nonlocal'], materials['eps_medium'], eps_metal, eps_inf, materials['omega_p'], materials['gamma'], materials['v_F'], materials['D'], omega, r, d, dipole['orientation'])
plot_params = (
(gamma_tot, r'$\gamma_\mathrm{sp} / \gamma_0$', 'linear'),
(gamma_r, r'$\gamma_\mathrm{r} / \gamma_0$', 'linear'),
)
make_1d_plot(range(1, params['n_max']+1), r'$n_\mathrm{max}$', plot_params, style='.')
def convert_units(x, x_unit):
"""Return length converted to metres."""
factors = {'m': 1e0,
'cm': 1e-2,
'mm': 1e-3,
'um': 1e-6,
'nm': 1e-9,
'A': 1e-10}
return x * factors[x_unit]
def convert_emission_to_omega(x, x_label):
"""Convert emission parameter to radian per second and return omega."""
if x_label == 'omega':
result = x
elif x_label == 'hbar omega (J)':
result = x / constants.hbar
elif x_label == 'hbar omega (eV)':
result = convert_eV_to_Hz(x)
elif x_label == 'frequency (Hz)':
result = 2.0 * constants.pi * x
elif x_label == 'wavelength (m)':
result = 2.0 * constants.pi * constants.c / x
elif x_label == 'wavelength (nm)':
result = 2.0 * constants.pi * constants.c / (x*1.0e-9)
else:
result = np.nan
return result
def permittivity(omega, materials):
"""Return the permittivity at omega for the specified metal."""
params_Olmon_Yang = {
# 'metal': (path to file, column delimiter, rows to skip),
'Olmon evaporated gold': ('Metals/Olmon_PRB2012_EV.dat', None, 2),
'Olmon template-stripped gold': ('Metals/Olmon_PRB2012_TS.dat', None, 2),
'Olmon single-crystal gold': ('Metals/Olmon_PRB2012_SC.dat', None, 2),
'Yang silver': ('Metals/Ag_C_corrected.csv', ',', 1),
}
if materials['metal'] == 'Drude':
eps = materials['eps_inf']
eps += free_response(omega, materials['omega_p'], materials['gamma'])
elif materials['metal'] in params_Olmon_Yang.keys():
fname, d, s = params_Olmon_Yang[materials['metal']]
data = np.loadtxt(fname, delimiter=d, skiprows=s, usecols=(0,2,3))
# flip columns such that omega is increasing
data = np.flipud(data)
eps = interp_permittivity(omega, data)
# USER IMPLEMENTED PERMITTIVITY
# fill and uncomment the block below
# make sure to convert the frequency to the proper units
# (omega should be in Hz, you can use convert_emission_to_omega)
# make sure to properly order the data for interpolation
# (omega should be increasing)
# elif materials['metal'] == '':
# fname = 'Metals/'
# d = None
# s = 0
# cols = (0,1,2)
# data = np.loadtxt(fname, delimiter=d, skiprows=s, usecols=cols)
# eps = interp_permittivity(omega, data)
else:
eps = np.nan
return eps
def interp_permittivity(omega, data):
omega_data = convert_eV_to_Hz(data[:, 0])
re_eps = np.interp(omega, omega_data, data[:, 1], left=np.nan, right=np.nan)
im_eps = np.interp(omega, omega_data, data[:, 2], left=np.nan, right=np.nan)
return re_eps + 1j*im_eps
def bound_response(omega, eps_metal, materials):
"""Return the bound response at omega."""
eps_inf = np.copy(eps_metal)
eps_inf -= free_response(omega, materials['omega_p'], materials['gamma'])
return eps_inf
def free_response(omega, omega_p, gamma):
"""Return the Drude free electrons response at omega."""
return - np.square(omega_p)/(omega*(omega + 1j*gamma))
def convert_eV_to_Hz(x_eV):
"""Return input converted from eV to Hz."""
return x_eV / constants.hbar * constants.eV
def save_data(distance, distance_unit, emission, emission_label, gamma_tot, gamma_r, gamma_nr, q):
"""Save the decay rates and quantum efficiency in results.txt."""
distance_grid, emission_grid = np.meshgrid(distance, emission)
X = map(np.ravel, (distance_grid, emission_grid, gamma_tot, gamma_r, gamma_nr, q))
columns = ('distance (' + distance_unit + ')',
emission_label,
'normalized total decay rate',
'normalized radiative decay rate',
'normalized nonradiative decay rate',
'quantum efficiency')
np.savetxt('results.txt', np.stack(X, axis=1), header=', '.join(columns))
def make_plot(distance, distance_n, distance_unit,
emission, emission_n, emission_label,
gamma_tot, gamma_r, gamma_nr, q):
"""Plot the decay rates and quantum efficiency."""
labels = {'omega': r'$\omega$',
'hbar omega (J)': r'$\hbar \omega$',
'hbar omega (eV)': r'$\hbar \omega$ (eV)',
'frequency (Hz)': r'$\nu$',
'wavelength (m)': r'$\lambda$',
'wavelength (nm)': r'$\lambda$ (nm)',
'gamma_sp': r'$\gamma_\mathrm{sp} / \gamma_0$',
'gamma_r': r'$\gamma_\mathrm{r} / \gamma_0$',
'gamma_nr': r'$\gamma_\mathrm{nr} / \gamma_0$',
'q': r'$q$'}
plot_params = (
(gamma_tot, labels['gamma_sp'], 'log'),
(gamma_r, labels['gamma_r'], 'log'),
(gamma_nr, labels['gamma_nr'], 'log'),
(q, labels['q'], 'linear')
)
if distance_n > 1 and emission_n > 1:
x_label = 'distance (' + distance_unit + ')'
y_label = labels[emission_label]
make_2d_plot(distance, x_label, emission, y_label, plot_params)
else:
if emission_n == 1:
x = distance
x_label = 'distance (' + distance_unit + ')'
else:
x = emission
x_label = labels[emission_label]
make_1d_plot(x, x_label, plot_params)
def make_2d_plot(x, x_label, y, y_label, plot_params):
"""Make a 2d map plot of the decay rates and quantum efficiency."""
plt.figure(figsize=(4*len(plot_params), 3))
X, Y = np.meshgrid(x, y)
for i, (Z, Z_label, Z_scale) in enumerate(plot_params, start=1):
if Z_scale == 'log':
Z_norm = LogNorm(vmin=Z.min(), vmax=Z.max())
else:
Z_norm=None
plt.subplot(1, len(plot_params), i)
plt.imshow(Z, aspect='auto', interpolation='bilinear',
norm=Z_norm, origin='lower',
extent=[X.min(), X.max(), Y.min(), Y.max()])
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(Z_label)
plt.colorbar()
plt.tight_layout()
plt.show()
plt.close()
def make_1d_plot(x, x_label, plot_params, style='-'):
"""Make a 1d plot of the decay rates and quantum efficiency."""
plt.figure(figsize=(4*len(plot_params), 3))
for i, (y, y_label, y_scale) in enumerate(plot_params, start=1):
plt.subplot(1, len(plot_params), i)
plt.plot(x, np.ravel(y), style)
plt.xlabel(x_label)
plt.xlim(x[0], x[-1])
plt.ylabel(y_label)
plt.yscale(y_scale)
plt.tight_layout()
plt.show()
plt.close()
if __name__ == "__main__":
from parameters import *
if params['save results'] or params['show results']:
processing(params, materials, geometry, dipole, distance, emission)
if params['show convergence']:
convergence(params, materials, geometry, dipole, distance, emission)
|
mit
|
elkingtonmcb/scikit-learn
|
sklearn/tests/test_multiclass.py
|
136
|
23649
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.multiclass import fit_ovr
from sklearn.multiclass import fit_ovo
from sklearn.multiclass import fit_ecoc
from sklearn.multiclass import predict_ovr
from sklearn.multiclass import predict_ovo
from sklearn.multiclass import predict_ecoc
from sklearn.multiclass import predict_proba_ovr
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import LabelBinarizer
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
with ignore_warnings():
assert_raises(ValueError, predict_ovr, [LinearSVC(), MultinomialNB()],
LabelBinarizer(), [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
prediction_from_list = ovo.fit(iris.data,
list(iris.target)).predict(iris.data)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
@ignore_warnings
def test_deprecated():
base_estimator = DecisionTreeClassifier(random_state=0)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
all_metas = [
(OneVsRestClassifier, fit_ovr, predict_ovr, predict_proba_ovr),
(OneVsOneClassifier, fit_ovo, predict_ovo, None),
(OutputCodeClassifier, fit_ecoc, predict_ecoc, None),
]
for MetaEst, fit_func, predict_func, proba_func in all_metas:
try:
meta_est = MetaEst(base_estimator,
random_state=0).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train,
random_state=0)
except TypeError:
meta_est = MetaEst(base_estimator).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train)
if len(fitted_return) == 2:
estimators_, classes_or_lb = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
X_test),
meta_est.predict(X_test))
if proba_func is not None:
assert_almost_equal(proba_func(estimators_, X_test,
is_multilabel=False),
meta_est.predict_proba(X_test))
else:
estimators_, classes_or_lb, codebook = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
codebook, X_test),
meta_est.predict(X_test))
|
bsd-3-clause
|
mne-tools/mne-tools.github.io
|
0.12/_downloads/plot_forward_sensitivity_maps.py
|
12
|
2425
|
"""
================================================
Display sensitivity maps for EEG and MEG sensors
================================================
Sensitivity maps can be produced from forward operators that
indicate how well different sensor types will be able to detect
neural currents from different regions of the brain.
To get started with forward modeling see ref:`tut_forward`.
"""
# Author: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import mne
from mne.datasets import sample
import matplotlib.pyplot as plt
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
subjects_dir = data_path + '/subjects'
# Read the forward solutions with surface orientation
fwd = mne.read_forward_solution(fwd_fname, surf_ori=True)
leadfield = fwd['sol']['data']
print("Leadfield size : %d x %d" % leadfield.shape)
###############################################################################
# Compute sensitivity maps
grad_map = mne.sensitivity_map(fwd, ch_type='grad', mode='fixed')
mag_map = mne.sensitivity_map(fwd, ch_type='mag', mode='fixed')
eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed')
###############################################################################
# Show gain matrix a.k.a. leadfield matrix with sensitivity map
picks_meg = mne.pick_types(fwd['info'], meg=True, eeg=False)
picks_eeg = mne.pick_types(fwd['info'], meg=False, eeg=True)
fig, axes = plt.subplots(2, 1, figsize=(10, 8), sharex=True)
fig.suptitle('Lead field matrix (500 dipoles only)', fontsize=14)
for ax, picks, ch_type in zip(axes, [picks_meg, picks_eeg], ['meg', 'eeg']):
im = ax.imshow(leadfield[picks, :500], origin='lower', aspect='auto',
cmap='RdBu_r')
ax.set_title(ch_type.upper())
ax.set_xlabel('sources')
ax.set_ylabel('sensors')
plt.colorbar(im, ax=ax, cmap='RdBu_r')
plt.show()
plt.figure()
plt.hist([grad_map.data.ravel(), mag_map.data.ravel(), eeg_map.data.ravel()],
bins=20, label=['Gradiometers', 'Magnetometers', 'EEG'],
color=['c', 'b', 'k'])
plt.legend()
plt.title('Normal orientation sensitivity')
plt.xlabel('sensitivity')
plt.ylabel('count')
plt.show()
grad_map.plot(time_label='Gradiometer sensitivity', subjects_dir=subjects_dir,
clim=dict(lims=[0, 50, 100]))
|
bsd-3-clause
|
boknilev/dsl-char-cnn
|
src/cnn_multifilter.py
|
1
|
6212
|
'''Character CNN code for DSL 2016 task 2
Partly based on:
https://github.com/fchollet/keras/blob/master/examples/imdb_cnn.py
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
import tensorflow as tf
tf.set_random_seed(1337) # probably not needed
from keras.preprocessing import sequence
from keras.models import Model, load_model
from keras.layers import Dense, Dropout, Activation, Flatten, Input
from keras.layers import Embedding, merge
from keras.layers import Convolution1D, MaxPooling1D
#from keras import backend as K
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
from keras.utils import np_utils
#from keras.regularizers import l1, l2, l1l2, activity_l1, activity_l2, activity_l1l2
#from keras.layers.normalization import BatchNormalization
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
from data import load_data, load_labels, alphabet, train_file, test_file, labels_file
# limit tensorflow memory usage
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.4
set_session(tf.Session(config=config))
# set tensorflow random seed for reproducibility
# model file
model_file = "cnn_model_gpu_multifilter.hdf5"
# set parameters:
print('Hyperparameters:')
alphabet_size = len(alphabet) + 2 # add 2, one padding and unknown chars
print('Alphabet size:', alphabet_size)
maxlen = 400
print('Max text len:', maxlen)
batch_size = 16
print('Batch size:', batch_size)
embedding_dims = 50
print('Embedding dim:', embedding_dims)
nb_filters = [50,50,100,100,100,100,100]
print('Number of filters:', nb_filters)
filter_lengths = [1,2,3,4,5,6,7]
print('Filter lengths:', filter_lengths)
hidden_dims = 250
print('Hidden dems:', hidden_dims)
nb_epoch = 30
embedding_droupout = 0.2
print('Embedding dropout:', embedding_droupout)
fc_dropout = 0.5
print('Fully-connected dropout:', fc_dropout)
print('Loading data...')
(X_train, y_train), (X_test, y_test), num_classes = load_data(train_file, test_file, alphabet)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print('Pad sequences (samples x time)')
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
y_train = np.array(y_train)
y_test = np.array(y_test)
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, num_classes)
Y_test = np_utils.to_categorical(y_test, num_classes)
print('Build model...')
main_input = Input(shape=(maxlen,))
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
embedding_layer = Embedding(alphabet_size,
embedding_dims,
input_length=maxlen,
dropout=embedding_droupout)
embedded = embedding_layer(main_input)
# we add a Convolution1D for each filter length, which will learn nb_filters[i]
# word group filters of size filter_lengths[i]:
convs = []
for i in xrange(len(nb_filters)):
conv_layer = Convolution1D(nb_filter=nb_filters[i],
filter_length=filter_lengths[i],
border_mode='valid',
activation='relu',
subsample_length=1)
conv_out = conv_layer(embedded)
# we use max pooling:
conv_out = MaxPooling1D(pool_length=conv_layer.output_shape[1])(conv_out)
# We flatten the output of the conv layer,
# so that we can concat all conv outpus and add a vanilla dense layer:
conv_out = Flatten()(conv_out)
convs.append(conv_out)
# concat all conv outputs
x = merge(convs, mode='concat') if len(convs) > 1 else convs[0]
#concat = BatchNormalization()(concat)
# We add a vanilla hidden layer:
x = Dense(hidden_dims)(x)
x = Dropout(fc_dropout)(x)
x = Activation('relu')(x)
# We project onto number of classes output layer, and squash it with a softmax:
main_output = Dense(num_classes, activation='softmax')(x)
# finally, define the model
model = Model(input=main_input, output=main_output)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Train...')
# define callbacks
stopping = EarlyStopping(monitor='val_loss', patience='10')
checkpointer = ModelCheckpoint(filepath=model_file, verbose=1, save_best_only=True)
tensorboard = TensorBoard(log_dir="./logs-multifilter", write_graph=False)
model.fit(X_train, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
validation_data=(X_test, Y_test),
callbacks=[stopping, checkpointer, tensorboard])
probabilities = model.predict(X_test, batch_size=batch_size)
predictions = probabilities.argmax(axis=-1)
idx2label = load_labels(labels_file)
#with open('cnn_predictions.txt', 'w') as g:
# for i in xrange(len(y_test)):
# g.write(' '.join([str(v) for v in X_test[i]]) + '\t' + idx2label.get(y_test[i], 'ERROR') + '\t' + idx2label.get(predictions[i], 'ERROR') + '\n')
print('Performance of final model (not necessarily best model):')
print('========================================================')
cm = confusion_matrix(y_test, predictions)
print('Confusion matrix:')
print(cm)
acc = accuracy_score(y_test, predictions)
print('Accuracy score:')
print(acc)
labels = [label for (idx, label) in sorted(idx2label.items())]
score_report = classification_report(y_test, predictions, target_names=labels)
print('Score report:')
print(score_report)
best_model = load_model(model_file)
probabilities = best_model.predict(X_test, batch_size=batch_size)
predictions = probabilities.argmax(axis=-1)
print('Performance of best model:')
print('==========================')
cm = confusion_matrix(y_test, predictions)
print('Confusion matrix:')
print(cm)
acc = accuracy_score(y_test, predictions)
print('Accuracy score:')
print(acc)
labels = [label for (idx, label) in sorted(idx2label.items())]
score_report = classification_report(y_test, predictions, target_names=labels)
print('Score report:')
print(score_report)
|
mit
|
vanpact/scipy
|
doc/source/tutorial/examples/normdiscr_plot2.py
|
84
|
1642
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
npoints = 20 # number of integer support points of the distribution minus 1
npointsh = npoints / 2
npointsf = float(npoints)
nbound = 4 #bounds for the truncated normal
normbound = (1 + 1 / npointsf) * nbound #actual bounds of truncated normal
grid = np.arange(-npointsh, npointsh+2,1) #integer grid
gridlimitsnorm = (grid - 0.5) / npointsh * nbound #bin limits for the truncnorm
gridlimits = grid - 0.5
grid = grid[:-1]
probs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound))
gridint = grid
normdiscrete = stats.rv_discrete(
values=(gridint, np.round(probs, decimals=7)),
name='normdiscrete')
n_sample = 500
np.random.seed(87655678) #fix the seed for replicability
rvs = normdiscrete.rvs(size=n_sample)
rvsnd = rvs
f,l = np.histogram(rvs,bins=gridlimits)
sfreq = np.vstack([gridint,f,probs*n_sample]).T
fs = sfreq[:,1] / float(n_sample)
ft = sfreq[:,2] / float(n_sample)
fs = sfreq[:,1].cumsum() / float(n_sample)
ft = sfreq[:,2].cumsum() / float(n_sample)
nd_std = np.sqrt(normdiscrete.stats(moments='v'))
ind = gridint # the x locations for the groups
width = 0.35 # the width of the bars
plt.figure()
plt.subplot(111)
rects1 = plt.bar(ind, ft, width, color='b')
rects2 = plt.bar(ind+width, fs, width, color='r')
normline = plt.plot(ind+width/2.0, stats.norm.cdf(ind+0.5,scale=nd_std),
color='b')
plt.ylabel('cdf')
plt.title('Cumulative Frequency and CDF of normdiscrete')
plt.xticks(ind+width, ind)
plt.legend((rects1[0], rects2[0]), ('true', 'sample'))
plt.show()
|
bsd-3-clause
|
treycausey/scikit-learn
|
sklearn/cluster/bicluster/spectral.py
|
4
|
19543
|
"""Implements spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.externals import six
from sklearn.utils.arpack import svds
from sklearn.utils.arpack import eigsh
from sklearn.cluster import KMeans
from sklearn.cluster import MiniBatchKMeans
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.extmath import make_nonnegative
from sklearn.utils.extmath import norm
from sklearn.utils.validation import assert_all_finite
from sklearn.utils.validation import check_arrays
from .utils import check_array_ndim
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X, = check_arrays(X, sparse_format='csr', dtype=np.float64)
check_array_ndim(X)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
_, v = eigsh(safe_sparse_dot(array.T, array),
ncv=self.n_svd_vecs)
vt = v.T
if np.any(np.isnan(u)):
_, u = eigsh(safe_sparse_dot(array, array.T),
ncv=self.n_svd_vecs)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
`rows_` : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
`columns_` : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
`row_labels_` : array-like, shape (n_rows,)
The bicluster label of each row.
`column_labels_` : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
`rows_` : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
`columns_` : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
`row_labels_` : array-like, shape (n_rows,)
Row partition labels.
`column_labels_` : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
|
bsd-3-clause
|
CVML/scikit-learn
|
doc/datasets/mldata_fixture.py
|
367
|
1183
|
"""Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
|
bsd-3-clause
|
ElDeveloper/scikit-learn
|
examples/decomposition/plot_ica_blind_source_separation.py
|
349
|
2228
|
"""
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
|
bsd-3-clause
|
M4573R/data
|
pew-religions/Religion-Leah.py
|
37
|
3271
|
#!/usr/bin/env python
import numpy as np
import pandas as pd
religions = ['Buddhist', 'Catholic', 'Evangel Prot', 'Hindu', 'Hist Black Prot', 'Jehovahs Witness', 'Jewish', 'Mainline Prot', 'Mormon', 'Muslim', 'Orthodox Christian', 'Unaffiliated']
csv = open("current.csv", 'w')
csv.truncate()
def write_row(matrix):
arr = np.asarray(matrix[0])[0]
row = ','.join([str(a) for a in arr]) + '\n'
csv.write(row)
# Intitial distribution of religions in US
first = np.matrix([.007, .208, .254, .007, .065, .008, .019, .147, .016, .009, .005, .228])
# Normed to sum to 100%
current = first / np.sum(first)
t0 = current
write_row(current)
# Transition matrix
trans = np.matrix(((0.390296314, 0.027141947, 0.06791021, 0.001857564, 0, 0, 0.011166082, 0.059762879, 0, 0, 0, 0.396569533),
(0.005370791, 0.593173325, 0.103151608, 0.000649759, 0.010486747, 0.005563864, 0.002041424, 0.053825329, 0.004760476, 0.001130529, 0.000884429, 0.199488989),
(0.00371836, 0.023900817, 0.650773331, 0.000250102, 0.016774503, 0.003098214, 0.001865491, 0.122807467, 0.004203107, 0.000186572, 0.002123778, 0.151866648),
(0, 0, 0.0033732, 0.804072618, 0, 0.001511151, 0, 0.01234639, 0, 0.00209748, 0, 0.17659916),
(0.002051357, 0.016851659, 0.09549708, 0, 0.699214315, 0.010620473, 0.000338804, 0.024372871, 0.000637016, 0.009406884, 0.000116843, 0.129892558),
(0, 0.023278276, 0.109573979, 0, 0.077957568, 0.336280578, 0, 0.074844833, 0.007624035, 0, 0, 0.35110361),
(0.006783201, 0.004082693, 0.014329604, 0, 0, 0.000610585, 0.745731278, 0.009587587, 0, 0, 0.002512334, 0.184058682),
(0.005770357, 0.038017215, 0.187857555, 0.000467601, 0.008144075, 0.004763516, 0.003601208, 0.451798506, 0.005753587, 0.000965543, 0.00109818, 0.25750798),
(0.007263135, 0.01684885, 0.06319935, 0.000248467, 0.0059394, 0, 0.001649896, 0.03464334, 0.642777489, 0.002606278, 0, 0.208904711),
(0, 0.005890381, 0.023573308, 0, 0.011510643, 0, 0.005518343, 0.014032084, 0, 0.772783807, 0, 0.15424369),
(0.004580353, 0.042045841, 0.089264134 , 0, 0.00527346, 0, 0, 0.061471387, 0.005979218, 0.009113978, 0.526728084, 0.243246723),
(0.006438308, 0.044866331, 0.1928814, 0.002035375, 0.04295005, 0.010833621, 0.011541439, 0.09457963, 0.01365141, 0.005884336, 0.002892072, 0.525359211)))
# Fertility array
fert = np.matrix(((2.1, 2.3, 2.3, 2.1, 2.5, 2.1, 2, 1.9, 3.4, 2.8, 2.1, 1.7)))
# Create data frame for printing later
religionDataFrame = pd.DataFrame()
for x in range(0,100):
### beginning of conversion step
# apply transition matrix to current distribution
current = current * trans
### beginning of fertility step
# divide by two for couple number
current = current/2
# adjust by fertility
current = np.multiply(fert, current)
# normalize to 100%
current = current / np.sum(current)
write_row(current)
# add to data frame
religionDataFrame = religionDataFrame.append(pd.DataFrame(current), ignore_index=True)
csv.close()
religionDataFrame.columns = religions
religionDataFrame.to_csv("current_pandas_save.csv")
|
mit
|
great-expectations/great_expectations
|
tests/integration/docusaurus/expectations/advanced/multi_batch_rule_based_profiler_example.py
|
1
|
5031
|
from ruamel import yaml
from great_expectations import DataContext
from great_expectations.rule_based_profiler.profiler import Profiler
profiler_config = """
# This profiler is meant to be used on the NYC taxi data (yellow_trip_data_sample_<YEAR>-<MONTH>.csv)
# located in tests/test_sets/taxi_yellow_trip_data_samples/
variables:
false_positive_rate: 0.01
mostly: 1.0
rules:
row_count_rule:
domain_builder:
class_name: TableDomainBuilder
parameter_builders:
- parameter_name: row_count_range
class_name: NumericMetricRangeMultiBatchParameterBuilder
batch_request:
datasource_name: taxi_pandas
data_connector_name: monthly
data_asset_name: my_reports
data_connector_query:
index: "-6:-1"
metric_name: table.row_count
metric_domain_kwargs: $domain.domain_kwargs
false_positive_rate: $variables.false_positive_rate
round_decimals: 0
truncate_values:
lower_bound: 0
expectation_configuration_builders:
- expectation_type: expect_table_row_count_to_be_between
class_name: DefaultExpectationConfigurationBuilder
module_name: great_expectations.rule_based_profiler.expectation_configuration_builder
min_value: $parameter.row_count_range.value.min_value
max_value: $parameter.row_count_range.value.max_value
mostly: $variables.mostly
meta:
profiler_details: $parameter.row_count_range.details
column_ranges_rule:
domain_builder:
class_name: SimpleSemanticTypeColumnDomainBuilder
semantic_types:
- numeric
# BatchRequest yielding exactly one batch (March, 2019 trip data)
batch_request:
datasource_name: taxi_pandas
data_connector_name: monthly
data_asset_name: my_reports
data_connector_query:
index: -1
parameter_builders:
- parameter_name: min_range
class_name: NumericMetricRangeMultiBatchParameterBuilder
batch_request:
datasource_name: taxi_pandas
data_connector_name: monthly
data_asset_name: my_reports
data_connector_query:
index: "-6:-1"
metric_name: column.min
metric_domain_kwargs: $domain.domain_kwargs
false_positive_rate: $variables.false_positive_rate
round_decimals: 2
- parameter_name: max_range
class_name: NumericMetricRangeMultiBatchParameterBuilder
batch_request:
datasource_name: taxi_pandas
data_connector_name: monthly
data_asset_name: my_reports
data_connector_query:
index: "-6:-1"
metric_name: column.max
metric_domain_kwargs: $domain.domain_kwargs
false_positive_rate: $variables.false_positive_rate
round_decimals: 2
expectation_configuration_builders:
- expectation_type: expect_column_min_to_be_between
class_name: DefaultExpectationConfigurationBuilder
module_name: great_expectations.rule_based_profiler.expectation_configuration_builder
column: $domain.domain_kwargs.column
min_value: $parameter.min_range.value.min_value
max_value: $parameter.min_range.value.max_value
mostly: $variables.mostly
meta:
profiler_details: $parameter.min_range.details
- expectation_type: expect_column_max_to_be_between
class_name: DefaultExpectationConfigurationBuilder
module_name: great_expectations.rule_based_profiler.expectation_configuration_builder
column: $domain.domain_kwargs.column
min_value: $parameter.max_range.value.min_value
max_value: $parameter.max_range.value.max_value
mostly: $variables.mostly
meta:
profiler_details: $parameter.max_range.details
"""
data_context = DataContext()
# Instantiate Profiler
full_profiler_config_dict: dict = yaml.load(profiler_config)
profiler: Profiler = Profiler(
profiler_config=full_profiler_config_dict,
data_context=data_context,
)
suite = profiler.profile(expectation_suite_name="test_suite_name")
print(suite)
# Please note that this docstring is here to demonstrate output for docs. It is not needed for normal use.
first_rule_suite = """
{
"meta": {"great_expectations_version": "0.13.19+58.gf8a650720.dirty"},
"data_asset_type": None,
"expectations": [
{
"kwargs": {"min_value": 10000, "max_value": 10000, "mostly": 1.0},
"expectation_type": "expect_table_row_count_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "table.row_count",
"metric_domain_kwargs": {},
}
}
},
}
],
"expectation_suite_name": "tmp_suite_Profiler_e66f7cbb",
}
"""
|
apache-2.0
|
Paul-St-Young/QMC
|
get_data.py
|
1
|
3798
|
#!/usr/bin/env python
import os
import numpy as np
import pandas as pd
import nexus_addon as na
import subprocess as sp
import xml.etree.ElementTree as ET
def twist_options(twist_input,orb_options = ['meshfactor','precision','twistnum']):
tree = ET.parse( twist_input )
dft_orb_builder_node = tree.findall(
'.//sposet_builder[@type="bspline"]')[0]
myoptns = {}
for optn in orb_options:
myoptns[optn] = dft_orb_builder_node.attrib[optn]
# end for
return myoptns
# end def
if __name__ == '__main__':
# initialize analyzer
from qmca import QBase
options = {"equilibration":"auto"}
QBase.options.transfer_from(options)
paths = sp.check_output(['find','../default','-name','dmc_4']).split('\n')[:-1]
for path in paths:
# the purpose of this loop is to generate raw data base twists.json, one for each folder
if os.path.isfile( os.path.join(path,'twists.json') ):
continue
# end if
# locate all inputs in this folder
cmd = 'ls %s | grep in.xml | grep twistnum'%path
proc = sp.Popen(cmd,shell=True,stdout=sp.PIPE,stderr=sp.PIPE)
out,err = proc.communicate()
inputs = out.split('\n')[:-1]
# make a database of all the scalar files
data = []
for qmc_input in inputs:
infile = os.path.join(path,qmc_input)
mydf = pd.DataFrame(na.scalars_from_input(infile))
toptn= twist_options(infile)
for optn in toptn.keys():
mydf[optn] = toptn[optn]
# end for
data.append( mydf )
# end for
df = pd.concat(data).reset_index().drop('index',axis=1)
# save raw data in local directory
pd.concat([df,df['settings'].apply(pd.Series)],axis=1).to_json(
os.path.join(path,'twists.json'))
# end for path
for path in paths:
# the purpose of this loop is to generate analyzed database 'scalars.json', one for each folder
if os.path.exists( os.path.join(path,'scalars.json') ):
continue
# end if
# load local data
df = pd.read_json(os.path.join(path,'twists.json'))
# !!!! only analyze real twists
#real_twists = [ 0, 8, 10, 32, 34, 40, 42, 2]
#df = df[ df['twistnum'].apply(lambda x:x in real_twists) ]
# exclude columns that don't need to be averaged, add more as needed
special_colmns = ['iqmc','method','path','settings','vol_unit','volume']
columns_to_average = df.drop(special_colmns,axis=1).columns
mean_names = []
error_names= []
for col_name in columns_to_average:
if col_name.endswith('_mean'):
mean_names.append(col_name)
elif col_name.endswith('_error'):
error_names.append(col_name)
# end if
# end for col_name
col_names = []
for iname in range(len(mean_names)):
mname = mean_names[iname].replace('_mean','')
ename = error_names[iname].replace('_error','')
assert mname == ename
col_names.append(mname)
# end for i
# perform twist averaging
new_means = df.groupby('iqmc')[mean_names].apply(np.mean)
ntwists = len(df[df['iqmc']==0]) # better way to determine ntwists?
new_errors = df.groupby('iqmc')[error_names].apply(
lambda x:np.sqrt((x**2.).sum())/ntwists)
# make averaged database
dfev = pd.merge(new_means.reset_index(),new_errors.reset_index())
extras = df[special_colmns].groupby('iqmc').apply(lambda x:x.iloc[0])
newdf = pd.merge( extras.drop('iqmc',axis=1).reset_index(), dfev)
newdf.to_json(os.path.join(path,'scalars.json'))
# end for
# congregate data
import dmc_databse_analyzer as dda
data = []
for path in paths:
jfile = path + '/scalars.json'
data.append( dda.process_dmc_data_frame( pd.read_json(jfile) ) )
# end for path
df = pd.concat(data).reset_index().drop('index',axis=1)
df.to_json('scalars.json')
# end __main__
|
mit
|
jjx02230808/project0223
|
sklearn/metrics/pairwise.py
|
9
|
45248
|
# -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal, or the equivalent
check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = np.atleast_2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.17
parameter *dense_output* for sparse output.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'laplacian': laplacian_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'laplacian' sklearn.pairwise.laplacian_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"laplacian": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
|
bsd-3-clause
|
ycaihua/scikit-learn
|
sklearn/metrics/cluster/supervised.py
|
17
|
26843
|
"""Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari: float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = np.bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
|
bsd-3-clause
|
mihail911/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/pyplot.py
|
69
|
77521
|
import sys
import matplotlib
from matplotlib import _pylab_helpers, interactive
from matplotlib.cbook import dedent, silent_list, is_string_like, is_numlike
from matplotlib.figure import Figure, figaspect
from matplotlib.backend_bases import FigureCanvasBase
from matplotlib.image import imread as _imread
from matplotlib import rcParams, rcParamsDefault, get_backend
from matplotlib.rcsetup import interactive_bk as _interactive_bk
from matplotlib.artist import getp, get, Artist
from matplotlib.artist import setp as _setp
from matplotlib.axes import Axes
from matplotlib.projections import PolarAxes
from matplotlib import mlab # for csv2rec in plotfile
from matplotlib.scale import get_scale_docs, get_scale_names
from matplotlib import cm
from matplotlib.cm import get_cmap
# We may not need the following imports here:
from matplotlib.colors import Normalize, normalize # latter for backwards compat.
from matplotlib.lines import Line2D
from matplotlib.text import Text, Annotation
from matplotlib.patches import Polygon, Rectangle, Circle, Arrow
from matplotlib.widgets import SubplotTool, Button, Slider, Widget
from ticker import TickHelper, Formatter, FixedFormatter, NullFormatter,\
FuncFormatter, FormatStrFormatter, ScalarFormatter,\
LogFormatter, LogFormatterExponent, LogFormatterMathtext,\
Locator, IndexLocator, FixedLocator, NullLocator,\
LinearLocator, LogLocator, AutoLocator, MultipleLocator,\
MaxNLocator
## Backend detection ##
def _backend_selection():
""" If rcParams['backend_fallback'] is true, check to see if the
current backend is compatible with the current running event
loop, and if not switches to a compatible one.
"""
backend = rcParams['backend']
if not rcParams['backend_fallback'] or \
backend not in _interactive_bk:
return
is_agg_backend = rcParams['backend'].endswith('Agg')
if 'wx' in sys.modules and not backend in ('WX', 'WXAgg'):
import wx
if wx.App.IsMainLoopRunning():
rcParams['backend'] = 'wx' + 'Agg' * is_agg_backend
elif 'qt' in sys.modules and not backend == 'QtAgg':
import qt
if not qt.qApp.startingUp():
# The mainloop is running.
rcParams['backend'] = 'qtAgg'
elif 'PyQt4.QtCore' in sys.modules and not backend == 'Qt4Agg':
import PyQt4.QtGui
if not PyQt4.QtGui.qApp.startingUp():
# The mainloop is running.
rcParams['backend'] = 'qt4Agg'
elif 'gtk' in sys.modules and not backend in ('GTK', 'GTKAgg',
'GTKCairo'):
import gobject
if gobject.MainLoop().is_running():
rcParams['backend'] = 'gtk' + 'Agg' * is_agg_backend
elif 'Tkinter' in sys.modules and not backend == 'TkAgg':
#import Tkinter
pass #what if anything do we need to do for tkinter?
_backend_selection()
## Global ##
from matplotlib.backends import pylab_setup
new_figure_manager, draw_if_interactive, show = pylab_setup()
def findobj(o=None, match=None):
if o is None:
o = gcf()
return o.findobj(match)
findobj.__doc__ = Artist.findobj.__doc__
def switch_backend(newbackend):
"""
Switch the default backend to newbackend. This feature is
**experimental**, and is only expected to work switching to an
image backend. Eg, if you have a bunch of PostScript scripts that
you want to run from an interactive ipython session, you may want
to switch to the PS backend before running them to avoid having a
bunch of GUI windows popup. If you try to interactively switch
from one GUI backend to another, you will explode.
Calling this command will close all open windows.
"""
close('all')
global new_figure_manager, draw_if_interactive, show
matplotlib.use(newbackend, warn=False)
reload(matplotlib.backends)
from matplotlib.backends import pylab_setup
new_figure_manager, draw_if_interactive, show = pylab_setup()
def isinteractive():
"""
Return the interactive status
"""
return matplotlib.is_interactive()
def ioff():
'Turn interactive mode off.'
matplotlib.interactive(False)
def ion():
'Turn interactive mode on.'
matplotlib.interactive(True)
def rc(*args, **kwargs):
matplotlib.rc(*args, **kwargs)
if matplotlib.rc.__doc__ is not None:
rc.__doc__ = dedent(matplotlib.rc.__doc__)
def rcdefaults():
matplotlib.rcdefaults()
draw_if_interactive()
if matplotlib.rcdefaults.__doc__ is not None:
rcdefaults.__doc__ = dedent(matplotlib.rcdefaults.__doc__)
# The current "image" (ScalarMappable) is tracked here on a
# per-pylab-session basis:
def gci():
"""
Get the current :class:`~matplotlib.cm.ScalarMappable` instance
(image or patch collection), or *None* if no images or patch
collections have been defined. The commands
:func:`~matplotlib.pyplot.imshow` and
:func:`~matplotlib.pyplot.figimage` create
:class:`~matplotlib.image.Image` instances, and the commands
:func:`~matplotlib.pyplot.pcolor` and
:func:`~matplotlib.pyplot.scatter` create
:class:`~matplotlib.collections.Collection` instances.
"""
return gci._current
gci._current = None
def sci(im):
"""
Set the current image (target of colormap commands like
:func:`~matplotlib.pyplot.jet`, :func:`~matplotlib.pyplot.hot` or
:func:`~matplotlib.pyplot.clim`).
"""
gci._current = im
## Any Artist ##
# (getp is simply imported)
def setp(*args, **kwargs):
ret = _setp(*args, **kwargs)
draw_if_interactive()
return ret
if _setp.__doc__ is not None:
setp.__doc__ = _setp.__doc__
## Figures ##
def figure(num=None, # autoincrement if None, else integer from 1-N
figsize = None, # defaults to rc figure.figsize
dpi = None, # defaults to rc figure.dpi
facecolor = None, # defaults to rc figure.facecolor
edgecolor = None, # defaults to rc figure.edgecolor
frameon = True,
FigureClass = Figure,
**kwargs
):
"""
call signature::
figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
Create a new figure and return a :class:`matplotlib.figure.Figure`
instance. If *num* = *None*, the figure number will be incremented and
a new figure will be created. The returned figure objects have a
*number* attribute holding this number.
If *num* is an integer, and ``figure(num)`` already exists, make it
active and return the handle to it. If ``figure(num)`` does not exist
it will be created. Numbering starts at 1, matlab style::
figure(1)
If you are creating many figures, make sure you explicitly call "close"
on the figures you are not using, because this will enable pylab
to properly clean up the memory.
Optional keyword arguments:
========= =======================================================
Keyword Description
========= =======================================================
figsize width x height in inches; defaults to rc figure.figsize
dpi resolution; defaults to rc figure.dpi
facecolor the background color; defaults to rc figure.facecolor
edgecolor the border color; defaults to rc figure.edgecolor
========= =======================================================
rcParams defines the default values, which can be modified in the
matplotlibrc file
*FigureClass* is a :class:`~matplotlib.figure.Figure` or derived
class that will be passed on to :meth:`new_figure_manager` in the
backends which allows you to hook custom Figure classes into the
pylab interface. Additional kwargs will be passed on to your
figure init function.
"""
if figsize is None : figsize = rcParams['figure.figsize']
if dpi is None : dpi = rcParams['figure.dpi']
if facecolor is None : facecolor = rcParams['figure.facecolor']
if edgecolor is None : edgecolor = rcParams['figure.edgecolor']
if num is None:
allnums = [f.num for f in _pylab_helpers.Gcf.get_all_fig_managers()]
if allnums:
num = max(allnums) + 1
else:
num = 1
else:
num = int(num) # crude validation of num argument
figManager = _pylab_helpers.Gcf.get_fig_manager(num)
if figManager is None:
if get_backend().lower() == 'ps': dpi = 72
figManager = new_figure_manager(num, figsize=figsize,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
frameon=frameon,
FigureClass=FigureClass,
**kwargs)
# make this figure current on button press event
def make_active(event):
_pylab_helpers.Gcf.set_active(figManager)
cid = figManager.canvas.mpl_connect('button_press_event', make_active)
figManager._cidgcf = cid
_pylab_helpers.Gcf.set_active(figManager)
figManager.canvas.figure.number = num
draw_if_interactive()
return figManager.canvas.figure
def gcf():
"Return a handle to the current figure."
figManager = _pylab_helpers.Gcf.get_active()
if figManager is not None:
return figManager.canvas.figure
else:
return figure()
def get_current_fig_manager():
figManager = _pylab_helpers.Gcf.get_active()
if figManager is None:
gcf() # creates an active figure as a side effect
figManager = _pylab_helpers.Gcf.get_active()
return figManager
# note we check for __doc__ is not None since py2exe optimize removes
# the docstrings
def connect(s, func):
return get_current_fig_manager().canvas.mpl_connect(s, func)
if FigureCanvasBase.mpl_connect.__doc__ is not None:
connect.__doc__ = dedent(FigureCanvasBase.mpl_connect.__doc__)
def disconnect(cid):
return get_current_fig_manager().canvas.mpl_disconnect(cid)
if FigureCanvasBase.mpl_disconnect.__doc__ is not None:
disconnect.__doc__ = dedent(FigureCanvasBase.mpl_disconnect.__doc__)
def close(*args):
"""
Close a figure window
``close()`` by itself closes the current figure
``close(num)`` closes figure number *num*
``close(h)`` where *h* is a :class:`Figure` instance, closes that figure
``close('all')`` closes all the figure windows
"""
if len(args)==0:
figManager = _pylab_helpers.Gcf.get_active()
if figManager is None: return
else:
figManager.canvas.mpl_disconnect(figManager._cidgcf)
_pylab_helpers.Gcf.destroy(figManager.num)
elif len(args)==1:
arg = args[0]
if arg=='all':
for manager in _pylab_helpers.Gcf.get_all_fig_managers():
manager.canvas.mpl_disconnect(manager._cidgcf)
_pylab_helpers.Gcf.destroy(manager.num)
elif isinstance(arg, int):
_pylab_helpers.Gcf.destroy(arg)
elif isinstance(arg, Figure):
for manager in _pylab_helpers.Gcf.get_all_fig_managers():
if manager.canvas.figure==arg:
manager.canvas.mpl_disconnect(manager._cidgcf)
_pylab_helpers.Gcf.destroy(manager.num)
else:
raise TypeError('Unrecognized argument type %s to close'%type(arg))
else:
raise TypeError('close takes 0 or 1 arguments')
def clf():
"""
Clear the current figure
"""
gcf().clf()
draw_if_interactive()
def draw():
'redraw the current figure'
get_current_fig_manager().canvas.draw()
def savefig(*args, **kwargs):
fig = gcf()
return fig.savefig(*args, **kwargs)
if Figure.savefig.__doc__ is not None:
savefig.__doc__ = dedent(Figure.savefig.__doc__)
def ginput(*args, **kwargs):
"""
Blocking call to interact with the figure.
This will wait for *n* clicks from the user and return a list of the
coordinates of each click.
If *timeout* is negative, does not timeout.
"""
return gcf().ginput(*args, **kwargs)
if Figure.ginput.__doc__ is not None:
ginput.__doc__ = dedent(Figure.ginput.__doc__)
def waitforbuttonpress(*args, **kwargs):
"""
Blocking call to interact with the figure.
This will wait for *n* key or mouse clicks from the user and
return a list containing True's for keyboard clicks and False's
for mouse clicks.
If *timeout* is negative, does not timeout.
"""
return gcf().waitforbuttonpress(*args, **kwargs)
if Figure.waitforbuttonpress.__doc__ is not None:
waitforbuttonpress.__doc__ = dedent(Figure.waitforbuttonpress.__doc__)
# Putting things in figures
def figtext(*args, **kwargs):
ret = gcf().text(*args, **kwargs)
draw_if_interactive()
return ret
if Figure.text.__doc__ is not None:
figtext.__doc__ = dedent(Figure.text.__doc__)
def suptitle(*args, **kwargs):
ret = gcf().suptitle(*args, **kwargs)
draw_if_interactive()
return ret
if Figure.suptitle.__doc__ is not None:
suptitle.__doc__ = dedent(Figure.suptitle.__doc__)
def figimage(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
ret = gcf().figimage(*args, **kwargs)
draw_if_interactive()
gci._current = ret
return ret
if Figure.figimage.__doc__ is not None:
figimage.__doc__ = dedent(Figure.figimage.__doc__) + """
Addition kwargs: hold = [True|False] overrides default hold state"""
def figlegend(handles, labels, loc, **kwargs):
"""
Place a legend in the figure.
*labels*
a sequence of strings
*handles*
a sequence of :class:`~matplotlib.lines.Line2D` or
:class:`~matplotlib.patches.Patch` instances
*loc*
can be a string or an integer specifying the legend
location
A :class:`matplotlib.legend.Legend` instance is returned.
Example::
figlegend( (line1, line2, line3),
('label1', 'label2', 'label3'),
'upper right' )
.. seealso::
:func:`~matplotlib.pyplot.legend`:
For information about the location codes
"""
l = gcf().legend(handles, labels, loc, **kwargs)
draw_if_interactive()
return l
## Figure and Axes hybrid ##
def hold(b=None):
"""
Set the hold state. If *b* is None (default), toggle the
hold state, else set the hold state to boolean value *b*::
hold() # toggle hold
hold(True) # hold is on
hold(False) # hold is off
When *hold* is *True*, subsequent plot commands will be added to
the current axes. When *hold* is *False*, the current axes and
figure will be cleared on the next plot command.
"""
fig = gcf()
ax = fig.gca()
fig.hold(b)
ax.hold(b)
# b=None toggles the hold state, so let's get get the current hold
# state; but should pyplot hold toggle the rc setting - me thinks
# not
b = ax.ishold()
rc('axes', hold=b)
def ishold():
"""
Return the hold status of the current axes
"""
return gca().ishold()
def over(func, *args, **kwargs):
"""
over calls::
func(*args, **kwargs)
with ``hold(True)`` and then restores the hold state.
"""
h = ishold()
hold(True)
func(*args, **kwargs)
hold(h)
## Axes ##
def axes(*args, **kwargs):
"""
Add an axes at position rect specified by:
- ``axes()`` by itself creates a default full ``subplot(111)`` window axis.
- ``axes(rect, axisbg='w')`` where *rect* = [left, bottom, width,
height] in normalized (0, 1) units. *axisbg* is the background
color for the axis, default white.
- ``axes(h)`` where *h* is an axes instance makes *h* the current
axis. An :class:`~matplotlib.axes.Axes` instance is returned.
======= ============ ================================================
kwarg Accepts Desctiption
======= ============ ================================================
axisbg color the axes background color
frameon [True|False] display the frame?
sharex otherax current axes shares xaxis attribute with otherax
sharey otherax current axes shares yaxis attribute with otherax
polar [True|False] use a polar axes?
======= ============ ================================================
Examples:
* :file:`examples/pylab_examples/axes_demo.py` places custom axes.
* :file:`examples/pylab_examples/shared_axis_demo.py` uses
*sharex* and *sharey*.
"""
nargs = len(args)
if len(args)==0: return subplot(111, **kwargs)
if nargs>1:
raise TypeError('Only one non keyword arg to axes allowed')
arg = args[0]
if isinstance(arg, Axes):
a = gcf().sca(arg)
else:
rect = arg
a = gcf().add_axes(rect, **kwargs)
draw_if_interactive()
return a
def delaxes(*args):
"""
``delaxes(ax)``: remove *ax* from the current figure. If *ax*
doesn't exist, an error will be raised.
``delaxes()``: delete the current axes
"""
if not len(args):
ax = gca()
else:
ax = args[0]
ret = gcf().delaxes(ax)
draw_if_interactive()
return ret
def gca(**kwargs):
"""
Return the current axis instance. This can be used to control
axis properties either using set or the
:class:`~matplotlib.axes.Axes` methods, for example, setting the
xaxis range::
plot(t,s)
set(gca(), 'xlim', [0,10])
or::
plot(t,s)
a = gca()
a.set_xlim([0,10])
"""
ax = gcf().gca(**kwargs)
return ax
# More ways of creating axes:
def subplot(*args, **kwargs):
"""
Create a subplot command, creating axes with::
subplot(numRows, numCols, plotNum)
where *plotNum* = 1 is the first plot number and increasing *plotNums*
fill rows first. max(*plotNum*) == *numRows* * *numCols*
You can leave out the commas if *numRows* <= *numCols* <=
*plotNum* < 10, as in::
subplot(211) # 2 rows, 1 column, first (upper) plot
``subplot(111)`` is the default axis.
New subplots that overlap old will delete the old axes. If you do
not want this behavior, use
:meth:`matplotlib.figure.Figure.add_subplot` or the
:func:`~matplotlib.pyplot.axes` command. Eg.::
from pylab import *
plot([1,2,3]) # implicitly creates subplot(111)
subplot(211) # overlaps, subplot(111) is killed
plot(rand(12), rand(12))
subplot(212, axisbg='y') # creates 2nd subplot with yellow background
Keyword arguments:
*axisbg*:
The background color of the subplot, which can be any valid
color specifier. See :mod:`matplotlib.colors` for more
information.
*polar*:
A boolean flag indicating whether the subplot plot should be
a polar projection. Defaults to False.
*projection*:
A string giving the name of a custom projection to be used
for the subplot. This projection must have been previously
registered. See :func:`matplotlib.projections.register_projection`
.. seealso::
:func:`~matplotlib.pyplot.axes`:
For additional information on :func:`axes` and
:func:`subplot` keyword arguments.
:file:`examples/pylab_examples/polar_scatter.py`
**Example:**
.. plot:: mpl_examples/pylab_examples/subplot_demo.py
"""
fig = gcf()
a = fig.add_subplot(*args, **kwargs)
bbox = a.bbox
byebye = []
for other in fig.axes:
if other==a: continue
if bbox.fully_overlaps(other.bbox):
byebye.append(other)
for ax in byebye: delaxes(ax)
draw_if_interactive()
return a
def twinx(ax=None):
"""
Make a second axes overlay *ax* (or the current axes if *ax* is
*None*) sharing the xaxis. The ticks for *ax2* will be placed on
the right, and the *ax2* instance is returned.
.. seealso::
:file:`examples/api_examples/two_scales.py`
"""
if ax is None:
ax=gca()
ax1 = ax.twinx()
draw_if_interactive()
return ax1
def twiny(ax=None):
"""
Make a second axes overlay *ax* (or the current axes if *ax* is
*None*) sharing the yaxis. The ticks for *ax2* will be placed on
the top, and the *ax2* instance is returned.
"""
if ax is None:
ax=gca()
ax1 = ax.twiny()
draw_if_interactive()
return ax1
def subplots_adjust(*args, **kwargs):
"""
call signature::
subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None)
Tune the subplot layout via the
:class:`matplotlib.figure.SubplotParams` mechanism. The parameter
meanings (and suggested defaults) are::
left = 0.125 # the left side of the subplots of the figure
right = 0.9 # the right side of the subplots of the figure
bottom = 0.1 # the bottom of the subplots of the figure
top = 0.9 # the top of the subplots of the figure
wspace = 0.2 # the amount of width reserved for blank space between subplots
hspace = 0.2 # the amount of height reserved for white space between subplots
The actual defaults are controlled by the rc file
"""
fig = gcf()
fig.subplots_adjust(*args, **kwargs)
draw_if_interactive()
def subplot_tool(targetfig=None):
"""
Launch a subplot tool window for *targetfig* (default gcf).
A :class:`matplotlib.widgets.SubplotTool` instance is returned.
"""
tbar = rcParams['toolbar'] # turn off the navigation toolbar for the toolfig
rcParams['toolbar'] = 'None'
if targetfig is None:
manager = get_current_fig_manager()
targetfig = manager.canvas.figure
else:
# find the manager for this figure
for manager in _pylab_helpers.Gcf._activeQue:
if manager.canvas.figure==targetfig: break
else: raise RuntimeError('Could not find manager for targetfig')
toolfig = figure(figsize=(6,3))
toolfig.subplots_adjust(top=0.9)
ret = SubplotTool(targetfig, toolfig)
rcParams['toolbar'] = tbar
_pylab_helpers.Gcf.set_active(manager) # restore the current figure
return ret
def box(on=None):
"""
Turn the axes box on or off according to *on*.
If *on* is *None*, toggle state.
"""
ax = gca()
if on is None:
on = not ax.get_frame_on()
ax.set_frame_on(on)
draw_if_interactive()
def title(s, *args, **kwargs):
"""
Set the title of the current axis to *s*.
Default font override is::
override = {'fontsize': 'medium',
'verticalalignment': 'bottom',
'horizontalalignment': 'center'}
.. seealso::
:func:`~matplotlib.pyplot.text`:
for information on how override and the optional args work.
"""
l = gca().set_title(s, *args, **kwargs)
draw_if_interactive()
return l
## Axis ##
def axis(*v, **kwargs):
"""
Set/Get the axis properties:
>>> axis()
returns the current axes limits ``[xmin, xmax, ymin, ymax]``.
>>> axis(v)
sets the min and max of the x and y axes, with
``v = [xmin, xmax, ymin, ymax]``.
>>> axis('off')
turns off the axis lines and labels.
>>> axis('equal')
changes limits of *x* or *y* axis so that equal increments of *x*
and *y* have the same length; a circle is circular.
>>> axis('scaled')
achieves the same result by changing the dimensions of the plot box instead
of the axis data limits.
>>> axis('tight')
changes *x* and *y* axis limits such that all data is shown. If
all data is already shown, it will move it to the center of the
figure without modifying (*xmax* - *xmin*) or (*ymax* -
*ymin*). Note this is slightly different than in matlab.
>>> axis('image')
is 'scaled' with the axis limits equal to the data limits.
>>> axis('auto')
and
>>> axis('normal')
are deprecated. They restore default behavior; axis limits are automatically
scaled to make the data fit comfortably within the plot box.
if ``len(*v)==0``, you can pass in *xmin*, *xmax*, *ymin*, *ymax*
as kwargs selectively to alter just those limits without changing
the others.
The xmin, xmax, ymin, ymax tuple is returned
.. seealso::
:func:`xlim`, :func:`ylim`
"""
ax = gca()
v = ax.axis(*v, **kwargs)
draw_if_interactive()
return v
def xlabel(s, *args, **kwargs):
"""
Set the *x* axis label of the current axis to *s*
Default override is::
override = {
'fontsize' : 'small',
'verticalalignment' : 'top',
'horizontalalignment' : 'center'
}
.. seealso::
:func:`~matplotlib.pyplot.text`:
For information on how override and the optional args work
"""
l = gca().set_xlabel(s, *args, **kwargs)
draw_if_interactive()
return l
def ylabel(s, *args, **kwargs):
"""
Set the *y* axis label of the current axis to *s*.
Defaults override is::
override = {
'fontsize' : 'small',
'verticalalignment' : 'center',
'horizontalalignment' : 'right',
'rotation'='vertical' : }
.. seealso::
:func:`~matplotlib.pyplot.text`:
For information on how override and the optional args
work.
"""
l = gca().set_ylabel(s, *args, **kwargs)
draw_if_interactive()
return l
def xlim(*args, **kwargs):
"""
Set/Get the xlimits of the current axes::
xmin, xmax = xlim() # return the current xlim
xlim( (xmin, xmax) ) # set the xlim to xmin, xmax
xlim( xmin, xmax ) # set the xlim to xmin, xmax
If you do not specify args, you can pass the xmin and xmax as
kwargs, eg.::
xlim(xmax=3) # adjust the max leaving min unchanged
xlim(xmin=1) # adjust the min leaving max unchanged
The new axis limits are returned as a length 2 tuple.
"""
ax = gca()
ret = ax.set_xlim(*args, **kwargs)
draw_if_interactive()
return ret
def ylim(*args, **kwargs):
"""
Set/Get the ylimits of the current axes::
ymin, ymax = ylim() # return the current ylim
ylim( (ymin, ymax) ) # set the ylim to ymin, ymax
ylim( ymin, ymax ) # set the ylim to ymin, ymax
If you do not specify args, you can pass the *ymin* and *ymax* as
kwargs, eg.::
ylim(ymax=3) # adjust the max leaving min unchanged
ylim(ymin=1) # adjust the min leaving max unchanged
The new axis limits are returned as a length 2 tuple.
"""
ax = gca()
ret = ax.set_ylim(*args, **kwargs)
draw_if_interactive()
return ret
def xscale(*args, **kwargs):
"""
call signature::
xscale(scale, **kwargs)
Set the scaling for the x-axis: %(scale)s
Different keywords may be accepted, depending on the scale:
%(scale_docs)s
"""
ax = gca()
ret = ax.set_xscale(*args, **kwargs)
draw_if_interactive()
return ret
xscale.__doc__ = dedent(xscale.__doc__) % {
'scale': ' | '.join([repr(_x) for _x in get_scale_names()]),
'scale_docs': get_scale_docs()}
def yscale(*args, **kwargs):
"""
call signature::
xscale(scale, **kwargs)
Set the scaling for the y-axis: %(scale)s
Different keywords may be accepted, depending on the scale:
%(scale_docs)s
"""
ax = gca()
ret = ax.set_yscale(*args, **kwargs)
draw_if_interactive()
return ret
yscale.__doc__ = dedent(yscale.__doc__) % {
'scale': ' | '.join([repr(_x) for _x in get_scale_names()]),
'scale_docs': get_scale_docs()}
def xticks(*args, **kwargs):
"""
Set/Get the xlimits of the current ticklocs and labels::
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = xticks()
# set the locations of the xticks
xticks( arange(6) )
# set the locations and labels of the xticks
xticks( arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue') )
The keyword args, if any, are :class:`~matplotlib.text.Text`
properties.
"""
ax = gca()
if len(args)==0:
locs = ax.get_xticks()
labels = ax.get_xticklabels()
elif len(args)==1:
locs = ax.set_xticks(args[0])
labels = ax.get_xticklabels()
elif len(args)==2:
locs = ax.set_xticks(args[0])
labels = ax.set_xticklabels(args[1], **kwargs)
else: raise TypeError('Illegal number of arguments to xticks')
if len(kwargs):
for l in labels:
l.update(kwargs)
draw_if_interactive()
return locs, silent_list('Text xticklabel', labels)
def yticks(*args, **kwargs):
"""
Set/Get the ylimits of the current ticklocs and labels::
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = yticks()
# set the locations of the yticks
yticks( arange(6) )
# set the locations and labels of the yticks
yticks( arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue') )
The keyword args, if any, are :class:`~matplotlib.text.Text`
properties.
"""
ax = gca()
if len(args)==0:
locs = ax.get_yticks()
labels = ax.get_yticklabels()
elif len(args)==1:
locs = ax.set_yticks(args[0])
labels = ax.get_yticklabels()
elif len(args)==2:
locs = ax.set_yticks(args[0])
labels = ax.set_yticklabels(args[1], **kwargs)
else: raise TypeError('Illegal number of arguments to yticks')
if len(kwargs):
for l in labels:
l.update(kwargs)
draw_if_interactive()
return ( locs,
silent_list('Text yticklabel', labels)
)
def rgrids(*args, **kwargs):
"""
Set/Get the radial locations of the gridlines and ticklabels on a
polar plot.
call signatures::
lines, labels = rgrids()
lines, labels = rgrids(radii, labels=None, angle=22.5, **kwargs)
When called with no arguments, :func:`rgrid` simply returns the
tuple (*lines*, *labels*), where *lines* is an array of radial
gridlines (:class:`~matplotlib.lines.Line2D` instances) and
*labels* is an array of tick labels
(:class:`~matplotlib.text.Text` instances). When called with
arguments, the labels will appear at the specified radial
distances and angles.
*labels*, if not *None*, is a len(*radii*) list of strings of the
labels to use at each angle.
If *labels* is None, the rformatter will be used
Examples::
# set the locations of the radial gridlines and labels
lines, labels = rgrids( (0.25, 0.5, 1.0) )
# set the locations and labels of the radial gridlines and labels
lines, labels = rgrids( (0.25, 0.5, 1.0), ('Tom', 'Dick', 'Harry' )
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('rgrids only defined for polar axes')
if len(args)==0:
lines = ax.yaxis.get_ticklines()
labels = ax.yaxis.get_ticklabels()
else:
lines, labels = ax.set_rgrids(*args, **kwargs)
draw_if_interactive()
return ( silent_list('Line2D rgridline', lines),
silent_list('Text rgridlabel', labels) )
def thetagrids(*args, **kwargs):
"""
Set/Get the theta locations of the gridlines and ticklabels.
If no arguments are passed, return a tuple (*lines*, *labels*)
where *lines* is an array of radial gridlines
(:class:`~matplotlib.lines.Line2D` instances) and *labels* is an
array of tick labels (:class:`~matplotlib.text.Text` instances)::
lines, labels = thetagrids()
Otherwise the syntax is::
lines, labels = thetagrids(angles, labels=None, fmt='%d', frac = 1.1)
set the angles at which to place the theta grids (these gridlines
are equal along the theta dimension).
*angles* is in degrees.
*labels*, if not *None*, is a len(angles) list of strings of the
labels to use at each angle.
If *labels* is *None*, the labels will be ``fmt%angle``.
*frac* is the fraction of the polar axes radius at which to place
the label (1 is the edge). Eg. 1.05 is outside the axes and 0.95
is inside the axes.
Return value is a list of tuples (*lines*, *labels*):
- *lines* are :class:`~matplotlib.lines.Line2D` instances
- *labels* are :class:`~matplotlib.text.Text` instances.
Note that on input, the *labels* argument is a list of strings,
and on output it is a list of :class:`~matplotlib.text.Text`
instances.
Examples::
# set the locations of the radial gridlines and labels
lines, labels = thetagrids( range(45,360,90) )
# set the locations and labels of the radial gridlines and labels
lines, labels = thetagrids( range(45,360,90), ('NE', 'NW', 'SW','SE') )
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('rgrids only defined for polar axes')
if len(args)==0:
lines = ax.xaxis.get_ticklines()
labels = ax.xaxis.get_ticklabels()
else:
lines, labels = ax.set_thetagrids(*args, **kwargs)
draw_if_interactive()
return (silent_list('Line2D thetagridline', lines),
silent_list('Text thetagridlabel', labels)
)
## Plotting Info ##
def plotting():
"""
Plotting commands
=============== =========================================================
Command Description
=============== =========================================================
axes Create a new axes
axis Set or return the current axis limits
bar make a bar chart
boxplot make a box and whiskers chart
cla clear current axes
clabel label a contour plot
clf clear a figure window
close close a figure window
colorbar add a colorbar to the current figure
cohere make a plot of coherence
contour make a contour plot
contourf make a filled contour plot
csd make a plot of cross spectral density
draw force a redraw of the current figure
errorbar make an errorbar graph
figlegend add a legend to the figure
figimage add an image to the figure, w/o resampling
figtext add text in figure coords
figure create or change active figure
fill make filled polygons
fill_between make filled polygons
gca return the current axes
gcf return the current figure
gci get the current image, or None
getp get a handle graphics property
hist make a histogram
hold set the hold state on current axes
legend add a legend to the axes
loglog a log log plot
imread load image file into array
imshow plot image data
matshow display a matrix in a new figure preserving aspect
pcolor make a pseudocolor plot
plot make a line plot
plotfile plot data from a flat file
psd make a plot of power spectral density
quiver make a direction field (arrows) plot
rc control the default params
savefig save the current figure
scatter make a scatter plot
setp set a handle graphics property
semilogx log x axis
semilogy log y axis
show show the figures
specgram a spectrogram plot
stem make a stem plot
subplot make a subplot (numrows, numcols, axesnum)
table add a table to the axes
text add some text at location x,y to the current axes
title add a title to the current axes
xlabel add an xlabel to the current axes
ylabel add a ylabel to the current axes
=============== =========================================================
The following commands will set the default colormap accordingly:
* autumn
* bone
* cool
* copper
* flag
* gray
* hot
* hsv
* jet
* pink
* prism
* spring
* summer
* winter
* spectral
"""
pass
def get_plot_commands(): return ( 'axes', 'axis', 'bar', 'boxplot', 'cla', 'clf',
'close', 'colorbar', 'cohere', 'csd', 'draw', 'errorbar',
'figlegend', 'figtext', 'figimage', 'figure', 'fill', 'gca',
'gcf', 'gci', 'get', 'gray', 'barh', 'jet', 'hist', 'hold', 'imread',
'imshow', 'legend', 'loglog', 'quiver', 'rc', 'pcolor', 'pcolormesh', 'plot', 'psd',
'savefig', 'scatter', 'set', 'semilogx', 'semilogy', 'show',
'specgram', 'stem', 'subplot', 'table', 'text', 'title', 'xlabel',
'ylabel', 'pie', 'polar')
def colors():
"""
This is a do nothing function to provide you with help on how
matplotlib handles colors.
Commands which take color arguments can use several formats to
specify the colors. For the basic builtin colors, you can use a
single letter
===== =======
Alias Color
===== =======
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
===== =======
For a greater range of colors, you have two options. You can
specify the color using an html hex string, as in::
color = '#eeefff'
or you can pass an R,G,B tuple, where each of R,G,B are in the
range [0,1].
You can also use any legal html name for a color, for example::
color = 'red',
color = 'burlywood'
color = 'chartreuse'
The example below creates a subplot with a dark
slate gray background
subplot(111, axisbg=(0.1843, 0.3098, 0.3098))
Here is an example that creates a pale turqoise title::
title('Is this the best color?', color='#afeeee')
"""
pass
def colormaps():
"""
matplotlib provides the following colormaps.
* autumn
* bone
* cool
* copper
* flag
* gray
* hot
* hsv
* jet
* pink
* prism
* spring
* summer
* winter
* spectral
You can set the colormap for an image, pcolor, scatter, etc,
either as a keyword argument::
imshow(X, cmap=cm.hot)
or post-hoc using the corresponding pylab interface function::
imshow(X)
hot()
jet()
In interactive mode, this will update the colormap allowing you to
see which one works best for your data.
"""
pass
## Plotting part 1: manually generated functions and wrappers ##
from matplotlib.colorbar import colorbar_doc
def colorbar(mappable=None, cax=None, ax=None, **kw):
if mappable is None:
mappable = gci()
if ax is None:
ax = gca()
ret = gcf().colorbar(mappable, cax = cax, ax=ax, **kw)
draw_if_interactive()
return ret
colorbar.__doc__ = colorbar_doc
def clim(vmin=None, vmax=None):
"""
Set the color limits of the current image
To apply clim to all axes images do::
clim(0, 0.5)
If either *vmin* or *vmax* is None, the image min/max respectively
will be used for color scaling.
If you want to set the clim of multiple images,
use, for example::
for im in gca().get_images():
im.set_clim(0, 0.05)
"""
im = gci()
if im is None:
raise RuntimeError('You must first define an image, eg with imshow')
im.set_clim(vmin, vmax)
draw_if_interactive()
def imread(*args, **kwargs):
return _imread(*args, **kwargs)
if _imread.__doc__ is not None:
imread.__doc__ = dedent(_imread.__doc__)
def matshow(A, fignum=None, **kw):
"""
Display an array as a matrix in a new figure window.
The origin is set at the upper left hand corner and rows (first
dimension of the array) are displayed horizontally. The aspect
ratio of the figure window is that of the array, unless this would
make an excessively short or narrow figure.
Tick labels for the xaxis are placed on top.
With the exception of fignum, keyword arguments are passed to
:func:`~matplotlib.pyplot.imshow`.
*fignum*: [ None | integer | False ]
By default, :func:`matshow` creates a new figure window with
automatic numbering. If *fignum* is given as an integer, the
created figure will use this figure number. Because of how
:func:`matshow` tries to set the figure aspect ratio to be the
one of the array, if you provide the number of an already
existing figure, strange things may happen.
If *fignum* is *False* or 0, a new figure window will **NOT** be created.
"""
if fignum is False or fignum is 0:
ax = gca()
else:
# Extract actual aspect ratio of array and make appropriately sized figure
fig = figure(fignum, figsize=figaspect(A))
ax = fig.add_axes([0.15, 0.09, 0.775, 0.775])
im = ax.matshow(A, **kw)
gci._current = im
draw_if_interactive()
return im
def polar(*args, **kwargs):
"""
call signature::
polar(theta, r, **kwargs)
Make a polar plot. Multiple *theta*, *r* arguments are supported,
with format strings, as in :func:`~matplotlib.pyplot.plot`.
"""
ax = gca(polar=True)
ret = ax.plot(*args, **kwargs)
draw_if_interactive()
return ret
def plotfile(fname, cols=(0,), plotfuncs=None,
comments='#', skiprows=0, checkrows=5, delimiter=',',
**kwargs):
"""
Plot the data in *fname*
*cols* is a sequence of column identifiers to plot. An identifier
is either an int or a string. If it is an int, it indicates the
column number. If it is a string, it indicates the column header.
matplotlib will make column headers lower case, replace spaces with
underscores, and remove all illegal characters; so ``'Adj Close*'``
will have name ``'adj_close'``.
- If len(*cols*) == 1, only that column will be plotted on the *y* axis.
- If len(*cols*) > 1, the first element will be an identifier for
data for the *x* axis and the remaining elements will be the
column indexes for multiple subplots
*plotfuncs*, if not *None*, is a dictionary mapping identifier to
an :class:`~matplotlib.axes.Axes` plotting function as a string.
Default is 'plot', other choices are 'semilogy', 'fill', 'bar',
etc. You must use the same type of identifier in the *cols*
vector as you use in the *plotfuncs* dictionary, eg., integer
column numbers in both or column names in both.
*comments*, *skiprows*, *checkrows*, and *delimiter* are all passed on to
:func:`matplotlib.pylab.csv2rec` to load the data into a record array.
kwargs are passed on to plotting functions.
Example usage::
# plot the 2nd and 4th column against the 1st in two subplots
plotfile(fname, (0,1,3))
# plot using column names; specify an alternate plot type for volume
plotfile(fname, ('date', 'volume', 'adj_close'), plotfuncs={'volume': 'semilogy'})
"""
fig = figure()
if len(cols)<1:
raise ValueError('must have at least one column of data')
if plotfuncs is None:
plotfuncs = dict()
r = mlab.csv2rec(fname, comments=comments,
skiprows=skiprows, checkrows=checkrows, delimiter=delimiter)
def getname_val(identifier):
'return the name and column data for identifier'
if is_string_like(identifier):
return identifier, r[identifier]
elif is_numlike(identifier):
name = r.dtype.names[int(identifier)]
return name, r[name]
else:
raise TypeError('identifier must be a string or integer')
xname, x = getname_val(cols[0])
if len(cols)==1:
ax1 = fig.add_subplot(1,1,1)
funcname = plotfuncs.get(cols[0], 'plot')
func = getattr(ax1, funcname)
func(x, **kwargs)
ax1.set_xlabel(xname)
else:
N = len(cols)
for i in range(1,N):
if i==1:
ax = ax1 = fig.add_subplot(N-1,1,i)
ax.grid(True)
else:
ax = fig.add_subplot(N-1,1,i, sharex=ax1)
ax.grid(True)
yname, y = getname_val(cols[i])
funcname = plotfuncs.get(cols[i], 'plot')
func = getattr(ax, funcname)
func(x, y, **kwargs)
ax.set_ylabel(yname)
if ax.is_last_row():
ax.set_xlabel(xname)
else:
ax.set_xlabel('')
if xname=='date':
fig.autofmt_xdate()
draw_if_interactive()
## Plotting part 2: autogenerated wrappers for axes methods ##
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def acorr(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().acorr(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.acorr.__doc__ is not None:
acorr.__doc__ = dedent(Axes.acorr.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def arrow(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().arrow(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.arrow.__doc__ is not None:
arrow.__doc__ = dedent(Axes.arrow.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axhline(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axhline(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axhline.__doc__ is not None:
axhline.__doc__ = dedent(Axes.axhline.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axhspan(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axhspan(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axhspan.__doc__ is not None:
axhspan.__doc__ = dedent(Axes.axhspan.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axvline(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axvline(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axvline.__doc__ is not None:
axvline.__doc__ = dedent(Axes.axvline.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axvspan(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axvspan(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axvspan.__doc__ is not None:
axvspan.__doc__ = dedent(Axes.axvspan.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def bar(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().bar(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.bar.__doc__ is not None:
bar.__doc__ = dedent(Axes.bar.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def barh(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().barh(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.barh.__doc__ is not None:
barh.__doc__ = dedent(Axes.barh.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def broken_barh(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().broken_barh(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.broken_barh.__doc__ is not None:
broken_barh.__doc__ = dedent(Axes.broken_barh.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def boxplot(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().boxplot(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.boxplot.__doc__ is not None:
boxplot.__doc__ = dedent(Axes.boxplot.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cohere(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().cohere(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.cohere.__doc__ is not None:
cohere.__doc__ = dedent(Axes.cohere.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def clabel(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().clabel(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.clabel.__doc__ is not None:
clabel.__doc__ = dedent(Axes.clabel.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def contour(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().contour(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
if ret._A is not None: gci._current = ret
hold(b)
return ret
if Axes.contour.__doc__ is not None:
contour.__doc__ = dedent(Axes.contour.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def contourf(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().contourf(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
if ret._A is not None: gci._current = ret
hold(b)
return ret
if Axes.contourf.__doc__ is not None:
contourf.__doc__ = dedent(Axes.contourf.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def csd(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().csd(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.csd.__doc__ is not None:
csd.__doc__ = dedent(Axes.csd.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def errorbar(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().errorbar(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.errorbar.__doc__ is not None:
errorbar.__doc__ = dedent(Axes.errorbar.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def fill(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().fill(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.fill.__doc__ is not None:
fill.__doc__ = dedent(Axes.fill.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def fill_between(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().fill_between(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.fill_between.__doc__ is not None:
fill_between.__doc__ = dedent(Axes.fill_between.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hexbin(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().hexbin(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.hexbin.__doc__ is not None:
hexbin.__doc__ = dedent(Axes.hexbin.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hist(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().hist(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.hist.__doc__ is not None:
hist.__doc__ = dedent(Axes.hist.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hlines(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().hlines(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.hlines.__doc__ is not None:
hlines.__doc__ = dedent(Axes.hlines.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def imshow(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().imshow(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.imshow.__doc__ is not None:
imshow.__doc__ = dedent(Axes.imshow.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def loglog(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().loglog(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.loglog.__doc__ is not None:
loglog.__doc__ = dedent(Axes.loglog.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pcolor(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().pcolor(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.pcolor.__doc__ is not None:
pcolor.__doc__ = dedent(Axes.pcolor.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pcolormesh(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().pcolormesh(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.pcolormesh.__doc__ is not None:
pcolormesh.__doc__ = dedent(Axes.pcolormesh.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pie(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().pie(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.pie.__doc__ is not None:
pie.__doc__ = dedent(Axes.pie.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def plot(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().plot(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.plot.__doc__ is not None:
plot.__doc__ = dedent(Axes.plot.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def plot_date(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().plot_date(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.plot_date.__doc__ is not None:
plot_date.__doc__ = dedent(Axes.plot_date.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def psd(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().psd(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.psd.__doc__ is not None:
psd.__doc__ = dedent(Axes.psd.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def quiver(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().quiver(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.quiver.__doc__ is not None:
quiver.__doc__ = dedent(Axes.quiver.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def quiverkey(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().quiverkey(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.quiverkey.__doc__ is not None:
quiverkey.__doc__ = dedent(Axes.quiverkey.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def scatter(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().scatter(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.scatter.__doc__ is not None:
scatter.__doc__ = dedent(Axes.scatter.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def semilogx(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().semilogx(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.semilogx.__doc__ is not None:
semilogx.__doc__ = dedent(Axes.semilogx.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def semilogy(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().semilogy(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.semilogy.__doc__ is not None:
semilogy.__doc__ = dedent(Axes.semilogy.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def specgram(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().specgram(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret[-1]
hold(b)
return ret
if Axes.specgram.__doc__ is not None:
specgram.__doc__ = dedent(Axes.specgram.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spy(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().spy(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.spy.__doc__ is not None:
spy.__doc__ = dedent(Axes.spy.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def stem(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().stem(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.stem.__doc__ is not None:
stem.__doc__ = dedent(Axes.stem.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def step(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().step(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.step.__doc__ is not None:
step.__doc__ = dedent(Axes.step.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def vlines(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().vlines(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.vlines.__doc__ is not None:
vlines.__doc__ = dedent(Axes.vlines.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def xcorr(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().xcorr(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.xcorr.__doc__ is not None:
xcorr.__doc__ = dedent(Axes.xcorr.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def barbs(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().barbs(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.barbs.__doc__ is not None:
barbs.__doc__ = dedent(Axes.barbs.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cla(*args, **kwargs):
ret = gca().cla(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.cla.__doc__ is not None:
cla.__doc__ = dedent(Axes.cla.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def grid(*args, **kwargs):
ret = gca().grid(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.grid.__doc__ is not None:
grid.__doc__ = dedent(Axes.grid.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def legend(*args, **kwargs):
ret = gca().legend(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.legend.__doc__ is not None:
legend.__doc__ = dedent(Axes.legend.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def table(*args, **kwargs):
ret = gca().table(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.table.__doc__ is not None:
table.__doc__ = dedent(Axes.table.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def text(*args, **kwargs):
ret = gca().text(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.text.__doc__ is not None:
text.__doc__ = dedent(Axes.text.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def annotate(*args, **kwargs):
ret = gca().annotate(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.annotate.__doc__ is not None:
annotate.__doc__ = dedent(Axes.annotate.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def autumn():
'''
set the default colormap to autumn and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='autumn')
im = gci()
if im is not None:
im.set_cmap(cm.autumn)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def bone():
'''
set the default colormap to bone and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='bone')
im = gci()
if im is not None:
im.set_cmap(cm.bone)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cool():
'''
set the default colormap to cool and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='cool')
im = gci()
if im is not None:
im.set_cmap(cm.cool)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def copper():
'''
set the default colormap to copper and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='copper')
im = gci()
if im is not None:
im.set_cmap(cm.copper)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def flag():
'''
set the default colormap to flag and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='flag')
im = gci()
if im is not None:
im.set_cmap(cm.flag)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def gray():
'''
set the default colormap to gray and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='gray')
im = gci()
if im is not None:
im.set_cmap(cm.gray)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hot():
'''
set the default colormap to hot and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='hot')
im = gci()
if im is not None:
im.set_cmap(cm.hot)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hsv():
'''
set the default colormap to hsv and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='hsv')
im = gci()
if im is not None:
im.set_cmap(cm.hsv)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def jet():
'''
set the default colormap to jet and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='jet')
im = gci()
if im is not None:
im.set_cmap(cm.jet)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pink():
'''
set the default colormap to pink and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='pink')
im = gci()
if im is not None:
im.set_cmap(cm.pink)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def prism():
'''
set the default colormap to prism and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='prism')
im = gci()
if im is not None:
im.set_cmap(cm.prism)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spring():
'''
set the default colormap to spring and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='spring')
im = gci()
if im is not None:
im.set_cmap(cm.spring)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def summer():
'''
set the default colormap to summer and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='summer')
im = gci()
if im is not None:
im.set_cmap(cm.summer)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def winter():
'''
set the default colormap to winter and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='winter')
im = gci()
if im is not None:
im.set_cmap(cm.winter)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spectral():
'''
set the default colormap to spectral and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='spectral')
im = gci()
if im is not None:
im.set_cmap(cm.spectral)
draw_if_interactive()
|
gpl-3.0
|
equialgo/scikit-learn
|
examples/linear_model/plot_robust_fit.py
|
147
|
3050
|
"""
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worse than OLS.
- The scores of HuberRegressor may not be compared directly to both TheilSen
and RANSAC because it does not attempt to completely filter the outliers
but lessen their effect.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn.linear_model import (
LinearRegression, TheilSenRegressor, RANSACRegressor, HuberRegressor)
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)),
('HuberRegressor', HuberRegressor())]
colors = {'OLS': 'turquoise', 'Theil-Sen': 'gold', 'RANSAC': 'lightgreen', 'HuberRegressor': 'black'}
linestyle = {'OLS': '-', 'Theil-Sen': '-.', 'RANSAC': '--', 'HuberRegressor': '--'}
lw = 3
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling Errors Only', X, y),
('Corrupt X, Small Deviants', X_errors, y),
('Corrupt y, Small Deviants', X, y_errors),
('Corrupt X, Large Deviants', X_errors_large, y),
('Corrupt y, Large Deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'b+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot, color=colors[name], linestyle=linestyle[name],
linewidth=lw, label='%s: error = %.3f' % (name, mse))
legend_title = 'Error of Mean\nAbsolute Deviation\nto Non-corrupt Data'
legend = plt.legend(loc='upper right', frameon=False, title=legend_title,
prop=dict(size='x-small'))
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
|
bsd-3-clause
|
JeffreyFish/DocWebTool
|
InternalAuditSampling.py
|
2
|
16411
|
#!/usr/bin/env python
# -*- Coding: UTF-8 -*-
#------------------------------------
#--Author: Jeffrey Yu
#--CreationDate: 2017/12/18 17:51
#--RevisedDate: 2018/01/15
#------------------------------------
import datetime
import random
import pyodbc
import pandas as pd
import common
def convert_id(x):
x = str(x)
id_list = []
ids1 = x.split('\n')
for line in ids1:
line = line.strip('\n')
line = line.strip('\r')
id_list.append(line)
return id_list
def run(month_diff, Last_Checked_SecId):
try:
print('Starting...\n')
last_check_secid_list = convert_id(Last_Checked_SecId)
connection_string = 'Driver={SQL Server};Server=dcdrdb601\dminputdb;Database=DocumentAcquisition;Uid=xxx;Pwd=xxx;Trusted_Domain=msdomain1;Trusted_Connection=yes;MARS_Connection=yes;'
connection = pyodbc.connect(connection_string)
current_month = datetime.datetime.now().month
# if current_month <= 6:
# # 1-6月,则取前一个月的SecId作为Pool
# month_diff = 1
# else:
# # 7-12月,则取前两个月的SecId作为Pool
# month_diff = 2
# print(
# 'The month is %d, it is sampling the SecIds from %d month before...\n'
# % (current_month, month_diff))
# month_diff = 1
# month_diff = int(input('Please input you want to sample the SecIds how many months before, then press Enter:'))
sql_code_secid_pool = '''
select distinct ss.SecId,count(distinct mp.ProcessId) as [DocNum]
from SecurityData..SecuritySearch ss
left join DocumentAcquisition..SECCurrentDocument cd on cd.InvestmentId=ss.SecId
left join DocumentAcquisition..MasterProcess mp on mp.DocumentId=cd.DocumentId
where cd.UpdateDate>=cast(convert(char(10),dateadd(dd,-day(dateadd(month,-%d,getdate()))+1,dateadd(month,-%d,getdate())),120) as datetime)
and cd.UpdateDate<cast(convert(char(10),dateadd(dd,-day(getdate())+1,getdate()),120) as datetime)
and ss.Universe in (
'/OEIC/',
'/OEIC/529/',
'/OEIC/ETF/',
'/OEIC/ETMF/',
'/OEIC/HF/',
'/OEIC/Ins/',
'/OEIC/Ins/Pen/',
'/OEIC/Ins/Ret/',
'/OEIC/MM/',
'/OEIC/MM/Ins/',
'/OEIC/MM/Ins/Pen/',
'/OEIC/MM/Ins/Ret/',
'/OEIC/MM/Pen/',
'/OEIC/MM/Ret/',
'/OEIC/Pen/',
'/OEIC/Ret/',
'/CEIC/',
'/CEIC/ETF/',
'/CEIC/HF/'
)
and ss.Status=1
and ss.CountryId='USA'
and mp.Status=10
and mp.Category=1
and mp.Format!='PDF'
and mp.CreationDate>=cast(convert(char(10),dateadd(dd,-day(dateadd(month,-%d,getdate()))+1,dateadd(month,-%d,getdate())),120) as datetime)
and mp.CreationDate<cast(convert(char(10),dateadd(dd,-day(getdate())+1,getdate()),120) as datetime)
group by ss.SecId
order by count(distinct mp.ProcessId) desc
'''
cursor = connection.cursor()
target_all_secid_list = [
row[0]
for row in cursor.execute(sql_code_secid_pool % (
month_diff, month_diff, month_diff, month_diff)).fetchall()
]
cursor.close()
# 获取上次Check过的SecId
# if os.path.isfile(cur_file_dir() + '\\Last_Checked_SecId.txt'):
# with open(
# cur_file_dir() + '\\Last_Checked_SecId.txt',
# 'r',
# encoding='UTF-8') as r:
# last_check_secid_list = list(
# set([
# each
# for each in [
# line.strip('\n') for line in r.readlines()
# ] if len(each)
# ]))
# else:
# error = input(
# 'Cannot find the "Last_Checked_SecId.txt", please add this file, press any button to exit...'
# )
# sys.exit()
# 在本次样本池中去掉上次Check过的SecId
target_all_secid_no_last_check = [
secid for secid in target_all_secid_list
if secid not in last_check_secid_list
]
# 从样本池中取Map过Pros、SAI、SP、Supplement类总数最多的50个SecId
target_secid_list_string = str(target_all_secid_no_last_check).replace('[', '').replace(']', '')
sql_code_pros_sai_sp = '''
select distinct top 50 ss.SecId,count(distinct mp.ProcessId) as [DocNum]
from SecurityData..SecuritySearch ss
left join DocumentAcquisition..SECCurrentDocument cd on cd.InvestmentId=ss.SecId
left join DocumentAcquisition..MasterProcess mp on mp.DocumentId=cd.DocumentId
where cd.UpdateDate>=cast(convert(char(10),dateadd(dd,-day(dateadd(month,-%d,getdate()))+1,dateadd(month,-%d,getdate())),120) as datetime)
and cd.UpdateDate<cast(convert(char(10),dateadd(dd,-day(getdate())+1,getdate()),120) as datetime)
and ss.Universe in (
'/OEIC/',
'/OEIC/529/',
'/OEIC/ETF/',
'/OEIC/ETMF/',
'/OEIC/HF/',
'/OEIC/Ins/',
'/OEIC/Ins/Pen/',
'/OEIC/Ins/Ret/',
'/OEIC/MM/',
'/OEIC/MM/Ins/',
'/OEIC/MM/Ins/Pen/',
'/OEIC/MM/Ins/Ret/',
'/OEIC/MM/Pen/',
'/OEIC/MM/Ret/',
'/OEIC/Pen/',
'/OEIC/Ret/',
'/CEIC/',
'/CEIC/ETF/',
'/CEIC/HF/'
)
and ss.Status=1
and ss.CountryId='USA'
and mp.Status=10
and mp.Category=1
and mp.Format!='PDF'
and mp.DocumentType in (1,2,3,15,17,60)
and mp.CreationDate>=cast(convert(char(10),dateadd(dd,-day(dateadd(month,-%d,getdate()))+1,dateadd(month,-%d,getdate())),120) as datetime)
and mp.CreationDate<cast(convert(char(10),dateadd(dd,-day(getdate())+1,getdate()),120) as datetime)
and ss.SecId in (%s)
group by ss.SecId
order by count(distinct mp.ProcessId) desc
'''
cursor = connection.cursor()
pros_secid_list_50 = [
row[0]
for row in cursor.execute(sql_code_pros_sai_sp % (
month_diff, month_diff, month_diff, month_diff, target_secid_list_string)).fetchall()
]
# 50个SecId中随机抽20个
pros_secid_list = random.sample(pros_secid_list_50, 20)
target_all_secid_no_last_check_1 = [
secid for secid in target_all_secid_no_last_check
if secid not in pros_secid_list
]
# 从样本池中取Map过AR总数最多的50个SecId
if len(pros_secid_list) < 20:
ar_num = (20 - len(pros_secid_list)) + 50
else:
ar_num = 50
target_secid_list_string = str(target_all_secid_no_last_check_1).replace('[', '').replace(']', '')
sql_code_ar_sar = '''
select distinct top %d ss.SecId,count(distinct mp.ProcessId) as [DocNum]
from SecurityData..SecuritySearch ss
left join DocumentAcquisition..SECCurrentDocument cd on cd.InvestmentId=ss.SecId
left join DocumentAcquisition..MasterProcess mp on mp.DocumentId=cd.DocumentId
where cd.UpdateDate>=cast(convert(char(10),dateadd(dd,-day(dateadd(month,-%d,getdate()))+1,dateadd(month,-%d,getdate())),120) as datetime)
and cd.UpdateDate<cast(convert(char(10),dateadd(dd,-day(getdate())+1,getdate()),120) as datetime)
and ss.Universe in (
'/OEIC/',
'/OEIC/529/',
'/OEIC/ETF/',
'/OEIC/ETMF/',
'/OEIC/HF/',
'/OEIC/Ins/',
'/OEIC/Ins/Pen/',
'/OEIC/Ins/Ret/',
'/OEIC/MM/',
'/OEIC/MM/Ins/',
'/OEIC/MM/Ins/Pen/',
'/OEIC/MM/Ins/Ret/',
'/OEIC/MM/Pen/',
'/OEIC/MM/Ret/',
'/OEIC/Pen/',
'/OEIC/Ret/',
'/CEIC/',
'/CEIC/ETF/',
'/CEIC/HF/'
)
and ss.Status=1
and ss.CountryId='USA'
and mp.Status=10
and mp.Category=1
and mp.Format!='PDF'
and mp.DocumentType=4
and mp.CreationDate>=cast(convert(char(10),dateadd(dd,-day(dateadd(month,-%d,getdate()))+1,dateadd(month,-%d,getdate())),120) as datetime)
and mp.CreationDate<cast(convert(char(10),dateadd(dd,-day(getdate())+1,getdate()),120) as datetime)
and ss.SecId in (%s)
group by ss.SecId
order by count(distinct mp.ProcessId) desc
'''
cursor = connection.cursor()
AR_secid_list_50 = [
row[0]
for row in cursor.execute(sql_code_ar_sar % (
ar_num, month_diff, month_diff, month_diff, month_diff, target_secid_list_string)).fetchall()
]
# 50个SecId中随机抽20个
AR_secid_list = random.sample(AR_secid_list_50, 20)
target_all_secid_no_last_check_2 = [
secid for secid in target_all_secid_no_last_check_1
if secid not in AR_secid_list
]
# 从样本池中取Map过SAR总数最多的50个SecId
if len(AR_secid_list) + len(pros_secid_list) < 40:
sup_num = (40 - (len(AR_secid_list) + len(pros_secid_list))) + 50
else:
sup_num = 50
target_secid_list_string = str(target_all_secid_no_last_check_2).replace('[', '').replace(']', '')
sql_code_sup = '''
select distinct top %d ss.SecId,count(distinct mp.ProcessId) as [DocNum]
from SecurityData..SecuritySearch ss
left join DocumentAcquisition..SECCurrentDocument cd on cd.InvestmentId=ss.SecId
left join DocumentAcquisition..MasterProcess mp on mp.DocumentId=cd.DocumentId
where cd.UpdateDate>=cast(convert(char(10),dateadd(dd,-day(dateadd(month,-%d,getdate()))+1,dateadd(month,-%d,getdate())),120) as datetime)
and cd.UpdateDate<cast(convert(char(10),dateadd(dd,-day(getdate())+1,getdate()),120) as datetime)
and ss.Universe in (
'/OEIC/',
'/OEIC/529/',
'/OEIC/ETF/',
'/OEIC/ETMF/',
'/OEIC/HF/',
'/OEIC/Ins/',
'/OEIC/Ins/Pen/',
'/OEIC/Ins/Ret/',
'/OEIC/MM/',
'/OEIC/MM/Ins/',
'/OEIC/MM/Ins/Pen/',
'/OEIC/MM/Ins/Ret/',
'/OEIC/MM/Pen/',
'/OEIC/MM/Ret/',
'/OEIC/Pen/',
'/OEIC/Ret/',
'/CEIC/',
'/CEIC/ETF/',
'/CEIC/HF/'
)
and ss.Status=1
and ss.CountryId='USA'
and mp.Status=10
and mp.Category=1
and mp.Format!='PDF'
and mp.DocumentType=5
and mp.CreationDate>=cast(convert(char(10),dateadd(dd,-day(dateadd(month,-%d,getdate()))+1,dateadd(month,-%d,getdate())),120) as datetime)
and mp.CreationDate<cast(convert(char(10),dateadd(dd,-day(getdate())+1,getdate()),120) as datetime)
and ss.SecId in (%s)
group by ss.SecId
order by count(distinct mp.ProcessId) desc
'''
cursor = connection.cursor()
SAR_secid_list_50 = [
row[0]
for row in cursor.execute(sql_code_sup % (
sup_num, month_diff, month_diff, month_diff, month_diff, target_secid_list_string)).fetchall()
]
# 50个SecId中随机抽20个
SAR_secid_list = random.sample(SAR_secid_list_50, 20)
target_secid_list = pros_secid_list + AR_secid_list + SAR_secid_list
target_secid_list_string = str(target_secid_list).replace('[', '').replace(']', '')
# # 取Map Doc数量最多的40个SecId
# part1_secid_list = [
# secid for secid in target_all_secid_no_last_check[0:40]
# ]
# # 剩下的SecId中随机取20个
# part2_secid_list = random.sample(
# [secid for secid in target_all_secid_no_last_check[40:]], 20)
# print('Sampling the documents, please waiting...\n')
# target_secid_list = part1_secid_list + part2_secid_list
# target_secid_list_string = str(target_secid_list).replace('[','').replace(']', '')
# 抽取60个SecId的Current Doc
sql_code_get_doc = '''
select ss.SecId,ss.SecurityName,cim.ContractId,ss.Ticker,cs.CIK,ss.FundId,ss.Universe,
sp.Value as [DocType],CONVERT(varchar(10),mp.EffectiveDate,120) as [EffectiveDate],mp.DocumentId,
mp.CreationDate,CONVERT(varchar(10),mp.DocumentDate,120) as [DocumentDate],mp.Format,
[Checker]='',
[Free or Defect]='',
[Comment]='',
[Confirm DA]='',
[DA Confirmation]='',
[Confirmation Comment]=''
from SecurityData..SecuritySearch as ss
left join DocumentAcquisition..ContractIdInvestmentMapping as cim on cim.InvestmentId=ss.SecId
left join SecurityData..FundSearch as fs on fs.FundId=ss.FundId
left join SecurityData..CompanySearch as cs on cs.CompanyId=fs.RegistrantId
left join DocumentAcquisition..SECCurrentDocument as cdi on cdi.InvestmentId=ss.SecId
left join DocumentAcquisition..MasterProcess as mp on cdi.DocumentId=mp.DocumentId
left join DocumentAcquisition..SystemParameter as sp on sp.CodeInt=mp.DocumentType and sp.CategoryId=105
where --mp.CreationDate>=cast(convert(char(10),dateadd(dd,-day(dateadd(month,-%d,getdate()))+1,dateadd(month,-%d,getdate())),120) as datetime)
--and mp.CreationDate<cast(convert(char(10),dateadd(dd,-day(getdate())+1,getdate()),120) as datetime)
--and
ss.SecId in (%s) and mp.Format='HTM'
and mp.DocumentType in (1,2,3,4,5,15,17,60,62)
order by CONVERT(varchar(10),mp.DocumentDate,120) desc
'''
pd_doc_list = pd.read_sql(sql_code_get_doc %
(month_diff, month_diff,
target_secid_list_string), connection)
print('Saving the result Excel file...\n')
excel_file = 'Audit Sample-' + datetime.datetime.now().strftime('%Y%m%d') + '.xlsx'
pd_doc_list.to_excel(common.temp_path + excel_file,
index=False,
encoding='UTF-8')
connection.close()
print('All done! It will be closed in 10 seconds.')
return excel_file
except Exception as e:
print(str(e))
return str(e)
|
gpl-3.0
|
Starkiller4011/tsat
|
tsat/sf/sfprep.py
|
1
|
22753
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
#####################################
# ╔╗ ┬ ┬ ┬┌─┐ ╔╦╗┌─┐┌┬┐ #
# ╠╩╗│ │ │├┤ ║║│ │ │ #
# ╚═╝┴─┘└─┘└─┘ ═╩╝└─┘ ┴ #
# ╔═╗┌─┐┌─┐┌┬┐┬ ┬┌─┐┬─┐┌─┐ #
# ╚═╗│ │├┤ │ │││├─┤├┬┘├┤ #
# ╚═╝└─┘└ ┴ └┴┘┴ ┴┴└─└─┘ #
#####################################
Author: Derek Blue
"""
# Future Imports
from __future__ import division, print_function
# Module Imports
import sys
import os
os.system('cls||echo -e \\\\033c')
print("Importing required modules...\n")
# Numpy
try:
import numpy as np
except ImportError:
print("Module 'numpy' required but not installed.")
print("Try 'pip install numpy' from a terminal.")
sys.exit()
else:
print("numpy: success")
# Pandas
try:
import pandas as pd
except ImportError:
print("Module 'pandas' required but not installed.")
print("Try 'pip install pandas' from a terminal.")
sys.exit()
else:
print("pandas: success")
# Scipy
try:
import scipy as sp
except ImportError:
print("Module 'scipy' required but not installed.")
print("Try 'pip install scipy' from a terminal.")
sys.exit()
else:
print("scipy: success")
# dfgui
try:
from dfgui import show as pdui
except ImportError:
print("Module 'dfgui' required but not installed. Defaulting to terminal display.")
print("Dataframes will only be displeyed as text in the terminal.")
print("For a GUI visualizer for dataframes install dfgui via the following:")
print("git clone https://github.com/bluenote10/PandasDataFrameGUI.git")
print("cd dfgui")
print("pip install -e .")
print("./demo.py")
print("Further documentation can be found here:")
print("https://github.com/bluenote10/PandasDataFrameGUI")
PDUI_PRESENT = False
else:
print("dfgui: success")
PDUI_PRESENT = True
# matplotlib
try:
import matplotlib.pyplot as plt
except ImportError:
print("Module 'matplotlib.pyplot' required but not installed.")
print("Try 'pip install matplotlib' from a terminal.")
sys.exit()
else:
print("matplotlib: success")
# End imports
print("\nModules loaded, setting verbosity...\n")
# Global constants
PDUI_PRESENT = PDUI_PRESENT
FILTERS = ['B', 'COUNTS', 'M2', 'U', 'V', 'W1', 'W2']
MAIN_MENU = __doc__[:-62] + '\nAstroSF: Structure Function Analysis Tool\n\n'
MAIN_MENU += 'Available methods:\n\n1) Load data file\n2) View data table\n'
MAIN_MENU += '3) Polish raw structure function data\n4) Select fitting region\n'
MAIN_MENU += '5) Fit model function to data\n6) Save data to file\n7) Change verbosity\n'
MAIN_MENU += '\n0) Close program\n\n'
# Sub-Processes
def clear():
'''
Clears the terminal screen and scroll back to present
the user with a nice clean, new screen. Useful for managing
menu screens in terminal applications.
'''
os.system('cls||echo -e \\\\033c')
def enter_cont():
'''
Wait for user to press enter to continue
'''
tmp = raw_input('Press enter to continue...')
def load_sftable(root, filename, verbose):
'''
loads the structure function table
'''
file_path = os.path.join(root, filename)
try:
if verbose:
print('******\nTrying to open data file using tab as delimiter...')
test = np.genfromtxt(file_path, delimiter='\t',
skip_header=0, names=True)
if verbose:
print(
'******\nSuccessfully opened data file using tab as delimiter.\n******\n')
table = pd.read_table(file_path, sep='\t')
except ValueError:
if verbose:
print('******\nData file does not use tab as delimiter, trying comma...')
try:
test = np.genfromtxt(file_path, delimiter=',',
skip_header=0, names=True)
except ValueError:
if verbose:
print(
'******\nData file is not in supported format, exiting...\n******\n')
sys.exit()
if verbose:
print(
'******\nSuccessfully opened data file using comma as delimiter.\n******\n')
table = pd.read_table(file_path, sep=',')
return table
def load_data(verbose):
'''
Load data method from main menu
'''
clear()
if verbose:
print('#####################################')
print('########## Load Data ##########')
print('#####################################\n')
root_loop = True
while root_loop:
data_path = os.path.join(os.getcwd(), 'DATA/TAB/SF')
if verbose:
files = []
data_contents = os.listdir(data_path)
for contents in data_contents:
if os.path.isfile(os.path.join(data_path, contents)):
files.append(str(contents))
print('Data files:\n')
for file in files:
print(' ', file)
print(' ')
file_name = raw_input('Please enter file name: ')
try:
table = load_sftable(data_path, file_name, verbose)
except IOError:
print('Could not load %s' % os.path.join(data_path, file_name))
print('Trying again...')
enter_cont()
else:
if verbose:
print('File: %s loaded successfuly...' %
os.path.join(data_path, file_name))
enter_cont()
return table
def view_table(data_table, verbose):
'''
Displays the currently selected data table
'''
clear()
if verbose:
print('#####################################')
print('########## View Data ##########')
print('#####################################\n')
loopctrl = True
while loopctrl:
ui_term = raw_input(
'Do you wish to view the data in a separate window?[Y|N]: ')
if (ui_term == 'Y') or (ui_term == 'y'):
pdui(data_table)
loopctrl = False
elif (ui_term == 'N') or (ui_term == 'n'):
print(data_table)
loopctrl = False
else:
print("Invalid entry, please use 'Y' or 'y' for yes or 'N' or 'n' for no.")
print('Returning to main menu...')
enter_cont()
def set_verbose(first):
'''
Set verbosity of program
'''
if first:
vloop = True
while vloop:
verbosity = raw_input(
'Do you want the program to run with high verbosity?[Y|N]: ')
if (verbosity == 'y') or (verbosity == 'Y'):
verbose = True
vmode = "High"
vloop = False
elif (verbosity == 'n') or (verbosity == 'N'):
verbose = False
vmode = "Low"
vloop = False
else:
print(
"Invalid entry, please use 'Y' or 'y' for yes or 'N' or 'n' for no.")
print('Verbose mode set to: %s' % vmode)
else:
clear()
print('#####################################')
print('######## Set Verbosity ########')
print('#####################################\n')
vloop = True
while vloop:
verbosity = raw_input(
'Do you want the program to run with high verbosity?[Y|N]: ')
if (verbosity == 'y') or (verbosity == 'Y'):
verbose = True
vmode = "High"
vloop = False
elif (verbosity == 'n') or (verbosity == 'N'):
verbose = False
vmode = "Low"
vloop = False
else:
print(
"Invalid entry, please use 'Y' or 'y' for yes or 'N' or 'n' for no.")
print('Verbose mode set to: %s' % vmode)
print('Returning to main menu...')
enter_cont()
clear()
return verbose
def subset(data_frame, tmin, tmax, verbose):
'''
Return subsetted dataframe within tmin and tmax
'''
if verbose:
print('******\nSubsetting data frame less than %s\n******\n' % tmax)
data_frame = data_frame[data_frame['Tau'] < tmax]
if verbose:
print('******\nSubsetting data frame greater than %s\n******\n' % tmin)
data_frame = data_frame[data_frame['Tau'] > tmin]
if verbose:
print('******\nSubset successful...\n******\n')
return data_frame
def save_data(data_frame, root, file_name, verbose):
'''
Saves the dataframe as csv file
'''
save_path = os.path.join(root, 'FittingTables')
if not os.path.exists(save_path):
if verbose:
print("******\n'FittingTables' folder does not exist, creating...\n******")
os.makedirs(save_path)
if verbose:
print('Successfully created folder\n******\n')
save_path = os.path.join(save_path, file_name)
try:
if verbose:
print('******\nTrying to save to %s\n******' % save_path)
data_frame.to_csv(save_path, sep=',', header=True, index=False)
if verbose:
print('******\nSave successful\n******\n')
except IOError:
print('Unable to save to %s' % save_path)
print('Exiting...')
sys.exit()
def get_bounds(data_frame, with_plot, verbose):
'''
Plots the structure function so user can choose
tmin and tmax values
'''
loopcrtl = True
if with_plot:
if verbose:
print('******\nGenerating plot\n******\n')
plt.plot(data_frame['Tau'], data_frame['SF'], 'k.', ms=0.8)
plt.errorbar(data_frame['Tau'], data_frame['SF'], yerr=data_frame['+-'],
fmt='k.', ms=0.8, capsize=2, elinewidth=0.5)
plt.xscale('log')
plt.yscale('log')
plt.show()
while loopcrtl:
tmin = float(raw_input('Please enter the lower bound: '))
tmax = float(raw_input('Please enter the upper bound: '))
if with_plot:
if verbose:
print('******\nGenerating plot\n******\n')
plt.plot(data_frame['Tau'], data_frame['SF'], 'k.', ms=0.8)
plt.errorbar(data_frame['Tau'], data_frame['SF'], yerr=data_frame['+-'],
fmt='k.', ms=0.8, capsize=2, elinewidth=0.5)
plt.axvline(x=tmin)
plt.axvline(x=tmax)
plt.xscale('log')
plt.yscale('log')
plt.show()
if verbose:
cont = raw_input('Are you happy with the bounds?[Y|N]: ')
else:
cont = 'y'
if (cont == 'y') or (cont == 'Y'):
loopcrtl = False
elif (cont == 'n') or (cont == 'N'):
loopcrtl = True
else:
print(
"Invalid input, use 'Y' or 'y' for yes or 'N' or 'n' for no, restarting")
if verbose:
print('******\nSet parameters are:\ntmin: %s\ntmax: %s\n******\n' %
(tmin, tmax))
return tmin, tmax
def get_region(current_table, verbose):
'''
Defines the fitting region
'''
clear()
if verbose:
print('#####################################')
print('##### Get Fitting Region ######')
print('#####################################\n')
with_plot = True
else:
with_plot = False
if verbose:
print('Getting bounds for fitting region...')
tmin, tmax = get_bounds(current_table, with_plot, verbose)
if verbose:
print('Bounds set, getting subset of table based on bounds...')
fitting_region = subset(current_table, tmin, tmax, verbose)
if verbose:
print('Subset retrieved, returning to main menu...')
enter_cont()
return fitting_region
def uniquefy(data_table):
'''
Returns unique values in columns
'''
x_col = data_table['Tau'].tolist()
x_col = np.unique(x_col)
y_col = []
e_col = []
for x_val in x_col:
dataframe = data_table[data_table['Tau'] == x_val]
dfy = dataframe['SF'].tolist()
dfe = dataframe['+-'].tolist()
y_col.append(np.unique(dfy)[0])
e_col.append(np.unique(dfe)[0])
udf = pd.DataFrame({'Tau': x_col,
'SF': y_col,
'+-': e_col})
return udf
def polish(data_table, verbose):
'''
Polishes raw structure function data which comes in table format
with multiple data points per actual point
'''
clear()
if verbose:
print('#####################################')
print('####### Polish SF Data ########')
print('#####################################\n')
try:
polished = uniquefy(data_table)
except ValueError:
if verbose:
print('Something went wrong... Is there a data table loaded?')
print('Returning to main menu...')
enter_cont()
clear()
else:
if verbose:
print('Data table polished...')
print('Returning to main menu...')
enter_cont()
clear()
return polished
def power_law(x_dat, params):
'''
Power law distribution:\n
f(x) = amp*(x^ind)\n
In log log:\n
log(f(x)) = log(amp) + ind*log(x)
'''
amp = params[0]
ind = params[1]
y_dat = amp * (x_dat**ind)
return y_dat
def linear_model(x_dat, params):
'''
Standard line in the form:\n
f(x) = a + b * x\n
For use with power law in log log
'''
y_dat = params[0] + params[1] * x_dat
return y_dat
def mse(x_dat, y_dat, model, model_params):
'''
Calculates and returns mean squared error
'''
residuals = y_dat - model(x_dat, model_params)
mse_dat = (residuals**2).sum() / (len(y_dat) - len(model_params))
return mse_dat
def fit_leastsq(params, x_dat, y_dat, model, err=None):
'''
Fit data with specified model using least squares method
'''
def errfunc(model_params, model_x, model_y, err=None):
'''
Residuals of model
'''
if err is None:
result = model_y - model(model_x, model_params)
else:
result = (model_y - model(model_x, model_params)) / err
return result
if err is None:
pfit, pcov, infodict, errmsg, success = sp.optimize.leastsq(
errfunc, params, args=(x_dat, y_dat), full_output=1, epsfcn=0.0001)
else:
pfit, pcov, infodict, errmsg, success = sp.optimize.leastsq(
errfunc, params, args=(x_dat, y_dat, err), full_output=1, epsfcn=0.0001)
if (len(y_dat) > len(params)) and pcov is not None:
s_sq = (errfunc(pfit, x_dat, y_dat)**2).sum() / \
(len(y_dat) - len(params))
pcov = pcov * s_sq
else:
pcov = np.inf
error = []
for index in range(len(pfit)):
try:
error.append(np.absolute(pcov[index][index])**0.5)
except TypeError:
error.append(0.00)
pfit_lsq = pfit
perr_lsq = np.array(error)
return pfit_lsq, perr_lsq
def fit_data(data_table, method):
'''
Fits model to data
'''
if method == 'std_pl':
x_col = np.array(data_table['Tau'].tolist(), dtype=np.float)
y_col = np.array(data_table['SF'].tolist(), dtype=np.float)
e_col = np.array(data_table['+-'].tolist(), dtype=np.float)
params, perrs = fit_leastsq([1, 1], x_col, y_col, power_law, e_col)
print('Standard least squares method:')
print('MSE: ', mse(x_col, y_col, power_law, params))
print('Amplitude: ', params[0], ' +/- ', perrs[0])
print('Power law index: ', params[1], ' +/- ', perrs[1])
plt.plot(x_col, y_col, 'k.', x_col, power_law(x_col, params), 'r-')
plt.xscale('log')
plt.yscale('log')
plt.show()
elif method == 'bkn_pl':
print('WIP: Broken power law')
def get_fit_method(data_table, verbose):
'''
User set fitting method
'''
clear()
if verbose:
print('#####################################')
print('########## Fit Data ###########')
print('#####################################\n')
print('Available fitting methods:\n')
print('1) Single power law')
print('2) Broken power law')
loopctrl = True
while loopctrl:
fit_method = int(
raw_input('\nWhich fitting method would you like to use: '))
print(' ')
if (fit_method < 1) or (fit_method > 2):
print('Invalid entry, available options are 1 or 2.\n')
elif fit_method == 1:
method = 'std_pl'
loopctrl = False
fit_data(data_table, method)
elif fit_method == 2:
method = 'bkn_pl'
loopctrl = False
fit_data(data_table, method)
print(' ')
enter_cont()
clear()
# Main Process
def __main__():
'''
Main function
'''
loopcrtl = True
verbose = set_verbose(True)
while loopcrtl:
print(MAIN_MENU)
try:
selection = int(
raw_input('Please enter your selection from above: '))
print(' ')
except ValueError:
print('Invalid entry, please enter a selection from the menu.')
enter_cont()
else:
if (selection < 0) or (selection > 7):
print('Invalid entry, please enter a selection from the menu.')
elif selection == 1:
current_table = load_data(verbose)
elif selection == 2:
try:
current_table
except NameError:
print('No data table has been loaded')
print('Please load data file using selection 1 from main menu')
enter_cont()
else:
try:
fitting_table
except NameError:
view_table(current_table, verbose)
else:
print('Both current and fitting tables loaded.')
print('Which table would you like to view?')
print("1) Current table\n2) Fitting Table")
try:
vtab = int(raw_input("Table to load: "))
except ValueError:
print('Invalid entry, returning to main menu...')
enter_cont()
else:
if vtab == 1:
view_table(current_table, verbose)
elif vtab == 2:
view_table(fitting_table, verbose)
else:
print('Invalid entry, returning to main menu...')
enter_cont()
elif selection == 3:
try:
current_table
except NameError:
print('No data table has been loaded')
print('Please load data file using selection 1 from main menu')
enter_cont()
else:
current_table = polish(current_table, verbose)
elif selection == 4:
try:
current_table
except NameError:
print('No data table has been loaded')
print('Please load data file using selection 1 from main menu')
enter_cont()
else:
fitting_table = get_region(current_table, verbose)
elif selection == 5:
try:
fitting_table
except NameError:
print('Fitting table not generated...')
print('Please load a data file and select a fitting region')
enter_cont()
else:
get_fit_method(fitting_table, verbose)
elif selection == 7:
verbose = set_verbose(False)
elif selection == 0:
print('Closing program...')
enter_cont()
loopcrtl = False
clear()
# Call to main
__main__()
'''
# Testing
current_table = load_sftable('/home/dblue/Documents/School/Summer2017/PY-SF/astroSF/DATA/TAB/SF',
'mkn335_V.csv', False)
current_table = current_table[current_table['Tau'] < 300]
xi = np.array(current_table['Tau'].tolist(), dtype=np.float)
yi = np.array(current_table['SF'].tolist(), dtype=np.float)
ei = np.array(current_table['+-'].tolist(), dtype=np.float)
cuts = []
mses = []
for cut in range(180, 30, -1):
t = subset(current_table, cut, 200, False)
tx = np.array(t['Tau'].tolist(), dtype=np.float)
ty = np.array(t['SF'].tolist(), dtype=np.float)
b = subset(current_table, 2, cut, False)
bx = np.array(b['Tau'].tolist(), dtype=np.float)
by = np.array(b['SF'].tolist(), dtype=np.float)
be = np.array(b['+-'].tolist(), dtype=np.float)
bparams, bperrs = fit_leastsq([1, 1], bx, by, power_law, be)
bm = mse(bx, by, power_law, bparams)
tm = mse(tx, ty, linear_model, [10**-0.2, 0.0])
cuts.append(cut)
mses.append(bm + tm)
#print('MSE of %s: %s' %(cut, mse(x, y, power_law, params)))
#plt.plot(x, y, 'k-', x, power_law(x, params), 'r-')
#plt.xscale('log')
#plt.yscale('log')
#plt.show()
pot_cut = np.min(mses)
cut_ind = mses.index(pot_cut)
cutoff = cuts[cut_ind]
fig = plt.figure(figsize=(10, 30))
ax1 = fig.add_subplot(211)
ax1.plot(xi, yi, 'k-', cuts, mses, 'r-')
ax1.errorbar(xi, yi, yerr=ei, fmt='k+')
ax1.axhline(y=10**-0.2, color='red')
ax1.axvline(x=cutoff)
ax1.axvline(x=200)
ax1.set_xscale('log')
ax1.set_yscale('log')
ax2 = fig.add_subplot(212)
ax2.plot(cuts, mses, 'r-')
ax2.axvline(x=cutoff)
ax2.set_xscale('log')
plt.show()
print(cutoff)
tfit = subset(current_table, 2, 91, False)
tfx = np.array(tfit['Tau'].tolist(), dtype=np.float)
tfy = np.array(tfit['SF'].tolist(), dtype=np.float)
tfe = np.array(tfit['+-'].tolist(), dtype=np.float)
params, perrs = fit_leastsq([1, 1], tfx, tfy, power_law, tfe)
plt.plot(tfx, tfy, 'k.', tfx, power_law(tfx, params), 'r-')
plt.xscale('log')
plt.yscale('log')
plt.show()
print('MSE: ', mse(tfx, tfy, power_law, params))
print('Amplitude: ', params[0], ' +/- ', perrs[0])
print('Power law index: ', params[1], ' +/- ', perrs[1])
'''
|
mit
|
janverschelde/PHCpack
|
src/Python/PHCpy2/examples/showpaths2.py
|
1
|
1803
|
def main():
"""
Excerpt from the user manual of phcpy.
"""
p = ['x^2 + y - 3;', 'x + 0.125*y^2 - 1.5;']
print 'constructing a total degree start system ...'
from phcpy.solver import total_degree_start_system as tds
q, qsols = tds(p)
print 'number of start solutions :', len(qsols)
from phcpy.trackers import initialize_standard_tracker
from phcpy.trackers import initialize_standard_solution
from phcpy.trackers import next_standard_solution
initialize_standard_tracker(p, q, False)
from phcpy.solutions import strsol2dict
import matplotlib.pyplot as plt
plt.ion()
fig = plt.figure()
for k in range(len(qsols)):
if(k == 0):
axs = fig.add_subplot(221)
elif(k == 1):
axs = fig.add_subplot(222)
elif(k == 2):
axs = fig.add_subplot(223)
elif(k == 3):
axs = fig.add_subplot(224)
startsol = qsols[k]
initialize_standard_solution(len(p),startsol)
dictsol = strsol2dict(startsol)
xpoints = [dictsol['x']]
ypoints = [dictsol['y']]
for k in range(300):
ns = next_standard_solution()
dictsol = strsol2dict(ns)
xpoints.append(dictsol['x'])
ypoints.append(dictsol['y'])
tval = dictsol['t'].real
# tval = eval(dictsol['t'].lstrip().split(' ')[0])
if(tval == 1.0):
break
print(ns)
xre = [point.real for point in xpoints]
yre = [point.real for point in ypoints]
axs.set_xlim(min(xre)-0.3, max(xre)+0.3)
axs.set_ylim(min(yre)-0.3, max(yre)+0.3)
dots, = axs.plot(xre,yre,'r-')
fig.canvas.draw()
fig.canvas.draw()
ans = raw_input('hit return to exit')
main()
|
gpl-3.0
|
timsnyder/bokeh
|
bokeh/core/properties.py
|
1
|
9722
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide property types for Bokeh models
Properties are objects that can be assigned as class attributes on Bokeh
models, to provide automatic serialization, validation, and documentation.
This documentation is broken down into the following sections:
.. contents::
:local:
Overview
--------
There are many property types defined in the module, for example ``Int`` to
represent integral values, ``Seq`` to represent sequences (e.g. lists or
tuples, etc.). Properties can also be combined: ``Seq(Float)`` represents
a sequence of floating point values.
For example, the following defines a model that has integer, string, and
list[float] properties:
.. code-block:: python
class SomeModel(Model):
foo = Int
bar = String(default="something")
baz = List(Float, help="docs for baz prop")
As seen, properties can be declared as just the property type, e.g.
``foo = Int``, in which case the properties are automatically instantiated
on new Model objects. Or the property can be instantiated on the class,
and configured with default values and help strings.
The properties of this class can be initialized by specifying keyword
arguments to the initializer:
.. code-block:: python
m = SomeModel(foo=10, bar="a str", baz=[1,2,3,4])
But also by setting the attributes on an instance:
.. code-block:: python
m.foo = 20
Attempts to set a property to a value of the wrong type will
result in a ``ValueError`` exception:
.. code-block:: python
>>> m.foo = 2.3
Traceback (most recent call last):
<< traceback omitted >>
ValueError: expected a value of type Integral, got 2.3 of type float
Models with properties know how to serialize themselves, to be understood
by BokehJS. Additionally, any help strings provided on properties can be
easily and automatically extracted with the Sphinx extensions in the
:ref:`bokeh.sphinxext` module.
Basic Properties
----------------
.. autoclass:: Angle
.. autoclass:: Any
.. autoclass:: AnyRef
.. autoclass:: Auto
.. autoclass:: Bool
.. autoclass:: Byte
.. autoclass:: Color
.. autoclass:: Complex
.. autoclass:: DashPattern
.. autoclass:: Date
.. autoclass:: Datetime
.. autoclass:: Either
.. autoclass:: Enum
.. autoclass:: Float
.. autoclass:: FontSize
.. autoclass:: Image
.. autoclass:: Instance
.. autoclass:: Int
.. autoclass:: Interval
.. autoclass:: JSON
.. autoclass:: MarkerType
.. autoclass:: MinMaxBounds
.. autoclass:: Percent
.. autoclass:: RGB
.. autoclass:: Regex
.. autoclass:: Size
.. autoclass:: String
.. autoclass:: Struct
.. autoclass:: TimeDelta
Container Properties
--------------------
.. autoclass:: Array
.. autoclass:: ColumnData
.. autoclass:: Dict
.. autoclass:: List
.. autoclass:: RelativeDelta
.. autoclass:: Seq
.. autoclass:: Tuple
DataSpec Properties
-------------------
.. autoclass:: AngleSpec
.. autoclass:: ColorSpec
.. autoclass:: DataDistanceSpec
.. autoclass:: DataSpec
.. autoclass:: DistanceSpec
.. autoclass:: FontSizeSpec
.. autoclass:: MarkerSpec
.. autoclass:: NumberSpec
.. autoclass:: ScreenDistanceSpec
.. autoclass:: StringSpec
.. autoclass:: UnitsSpec
Helpers
~~~~~~~
.. autofunction:: expr
.. autofunction:: field
.. autofunction:: value
Special Properties
------------------
.. autoclass:: Include
.. autoclass:: Override
Validation-only Properties
--------------------------
.. autoclass:: PandasDataFrame
.. autoclass:: PandasGroupBy
Validation Control
------------------
By default, Bokeh properties perform type validation on values. This helps to
ensure the consistency of any data exchanged between Python and JavaScript, as
well as provide detailed and immediate feedback to users if they attempt to
set values of the wrong type. However, these type checks incur some overhead.
In some cases it may be desirable to turn off validation in specific places,
or even entirely, in order to boost performance. The following API is available
to control when type validation occurs.
.. autoclass:: validate
.. autofunction:: without_property_validation
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'Angle',
'AngleSpec',
'Any',
'AnyRef',
'Array',
'Auto',
'Bool',
'Byte',
'Color',
'ColorHex',
'ColorSpec',
'ColumnData',
'Complex',
'DashPattern',
'DataDistanceSpec',
'DataSpec',
'Date',
'Datetime',
'Dict',
'DistanceSpec',
'Either',
'Enum',
'Float',
'FontSize',
'FontSizeSpec',
'HatchPatternSpec',
'HatchPatternType',
'Image',
'Include',
'Instance',
'Int',
'Interval',
'JSON',
'List',
'MarkerSpec',
'MarkerType',
'MinMaxBounds',
'NumberSpec',
'Override',
'PandasDataFrame',
'PandasGroupBy',
'Percent',
'RGB',
'Regex',
'RelativeDelta',
'ScreenDistanceSpec',
'Seq',
'Size',
'String',
'StringSpec',
'Struct',
'TimeDelta',
'Tuple',
'UnitsSpec',
'expr',
'field',
'validate',
'value',
'without_property_validation'
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
from .property.any import Any; Any
from .property.any import AnyRef; AnyRef
from .property.auto import Auto; Auto
from .property.color import Color; Color
from .property.color import RGB; RGB
from .property.color import ColorHex; ColorHex
from .property.container import Array; Array
from .property.container import ColumnData; ColumnData
from .property.container import Dict; Dict
from .property.container import List; List
from .property.container import Seq; Seq
from .property.container import Tuple; Tuple
from .property.container import RelativeDelta; RelativeDelta
from .property.dataspec import AngleSpec; AngleSpec
from .property.dataspec import ColorSpec; ColorSpec
from .property.dataspec import DataSpec; DataSpec
from .property.dataspec import DataDistanceSpec; DataDistanceSpec
from .property.dataspec import DistanceSpec; DistanceSpec
from .property.dataspec import expr; expr
from .property.dataspec import field; field
from .property.dataspec import FontSizeSpec; FontSizeSpec
from .property.dataspec import HatchPatternSpec; HatchPatternSpec
from .property.dataspec import MarkerSpec; MarkerSpec
from .property.dataspec import NumberSpec; NumberSpec
from .property.dataspec import ScreenDistanceSpec; ScreenDistanceSpec
from .property.dataspec import StringSpec; StringSpec
from .property.dataspec import UnitsSpec; UnitsSpec
from .property.dataspec import value; value
from .property.datetime import Date; Date
from .property.datetime import Datetime; Datetime
from .property.datetime import TimeDelta; TimeDelta
from .property.either import Either; Either
from .property.enum import Enum; Enum
from .property.include import Include ; Include
from .property.instance import Instance; Instance
from .property.json import JSON; JSON
from .property.numeric import Angle; Angle
from .property.numeric import Byte; Byte
from .property.numeric import Interval; Interval
from .property.numeric import NonNegativeInt; NonNegativeInt
from .property.numeric import Percent; Percent
from .property.numeric import Size; Size
from .property.override import Override ; Override
from .property.pandas import PandasDataFrame ; PandasDataFrame
from .property.pandas import PandasGroupBy ; PandasGroupBy
from .property.primitive import Bool; Bool
from .property.primitive import Complex; Complex
from .property.primitive import Int; Int
from .property.primitive import Float; Float
from .property.primitive import String; String
from .property.regex import Regex; Regex
from .property.struct import Struct; Struct
from .property.visual import DashPattern; DashPattern
from .property.visual import FontSize; FontSize
from .property.visual import HatchPatternType; HatchPatternType
from .property.visual import Image; Image
from .property.visual import MinMaxBounds; MinMaxBounds
from .property.visual import MarkerType; MarkerType
from .property.validation import validate; validate
from .property.validation import without_property_validation; without_property_validation
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
bsd-3-clause
|
nowls/gnuradio
|
gr-fec/python/fec/polar/channel_construction_awgn.py
|
24
|
8560
|
#!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation, Inc.
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
'''
Based on 2 papers:
[1] Ido Tal, Alexander Vardy: 'How To Construct Polar Codes', 2013
for an in-depth description of a widely used algorithm for channel construction.
[2] Harish Vangala, Emanuele Viterbo, Yi Hong: 'A Comparative Study of Polar Code Constructions for the AWGN Channel', 2015
for an overview of different approaches
'''
from scipy.optimize import fsolve
from scipy.special import erfc
from helper_functions import *
from channel_construction_bec import bhattacharyya_bounds
def solver_equation(val, s):
cw_lambda = codeword_lambda_callable(s)
ic_lambda = instantanious_capacity_callable()
return lambda y: ic_lambda(cw_lambda(y)) - val
def solve_capacity(a, s):
eq = solver_equation(a, s)
res = fsolve(eq, 1)
return np.abs(res[0]) # only positive values needed.
def codeword_lambda_callable(s):
return lambda y: np.exp(-2 * y * np.sqrt(2 * s))
def codeword_lambda(y, s):
return codeword_lambda_callable(s)(y)
def instantanious_capacity_callable():
return lambda x : 1 - np.log2(1 + x) + (x * np.log2(x) / (1 + x))
def instantanious_capacity(x):
return instantanious_capacity_callable()(x)
def q_function(x):
# Q(x) = (1 / sqrt(2 * pi) ) * integral (x to inf) exp(- x ^ 2 / 2) dx
return .5 * erfc(x / np.sqrt(2))
def discretize_awgn(mu, design_snr):
'''
needed for Binary-AWGN channels.
in [1] described in Section VI
in [2] described as a function of the same name.
in both cases reduce infinite output alphabet to a finite output alphabet of a given channel.
idea:
1. instantaneous capacity C(x) in interval [0, 1]
2. split into mu intervals.
3. find corresponding output alphabet values y of likelihood ratio function lambda(y) inserted into C(x)
4. Calculate probability for each value given that a '0' or '1' is was transmitted.
'''
s = 10 ** (design_snr / 10)
a = np.zeros(mu + 1, dtype=float)
a[-1] = np.inf
for i in range(1, mu):
a[i] = solve_capacity(1. * i / mu, s)
factor = np.sqrt(2 * s)
tpm = np.zeros((2, mu))
for j in range(mu):
tpm[0][j] = q_function(factor + a[j]) - q_function(factor + a[j + 1])
tpm[1][j] = q_function(-1. * factor + a[j]) - q_function(-1. * factor + a[j + 1])
tpm = tpm[::-1]
tpm[0] = tpm[0][::-1]
tpm[1] = tpm[1][::-1]
return tpm
def instant_capacity_delta_callable():
return lambda a, b: -1. * (a + b) * np.log2((a + b) / 2) + a * np.log2(a) + b * np.log2(b)
def capacity_delta_callable():
c = instant_capacity_delta_callable()
return lambda a, b, at, bt: c(a, b) + c(at, bt) - c(a + at, b + bt)
def quantize_to_size(tpm, mu):
# This is a degrading merge, compare [1]
calculate_delta_I = capacity_delta_callable()
L = np.shape(tpm)[1]
if not mu < L:
print('WARNING: This channel gets too small!')
# lambda works on vectors just fine. Use Numpy vector awesomeness.
delta_i_vec = calculate_delta_I(tpm[0, 0:-1], tpm[1, 0:-1], tpm[0, 1:], tpm[1, 1:])
for i in range(L - mu):
d = np.argmin(delta_i_vec)
ap = tpm[0, d] + tpm[0, d + 1]
bp = tpm[1, d] + tpm[1, d + 1]
if d > 0:
delta_i_vec[d - 1] = calculate_delta_I(tpm[0, d - 1], tpm[1, d - 1], ap, bp)
if d < delta_i_vec.size - 1:
delta_i_vec[d + 1] = calculate_delta_I(ap, bp, tpm[0, d + 1], tpm[1, d + 1])
delta_i_vec = np.delete(delta_i_vec, d)
tpm = np.delete(tpm, d, axis=1)
tpm[0, d] = ap
tpm[1, d] = bp
return tpm
def upper_bound_z_params(z, block_size, design_snr):
upper_bound = bhattacharyya_bounds(design_snr, block_size)
z = np.minimum(z, upper_bound)
return z
def tal_vardy_tpm_algorithm(block_size, design_snr, mu):
mu = mu // 2 # make sure algorithm uses only as many bins as specified.
block_power = power_of_2_int(block_size)
channels = np.zeros((block_size, 2, mu))
channels[0] = discretize_awgn(mu, design_snr) * 2
print('Constructing polar code with Tal-Vardy algorithm')
print('(block_size = {0}, design SNR = {1}, mu = {2}'.format(block_size, design_snr, 2 * mu))
show_progress_bar(0, block_size)
for j in range(0, block_power):
u = 2 ** j
for t in range(u):
show_progress_bar(u + t, block_size)
# print("(u={0}, t={1}) = {2}".format(u, t, u + t))
ch1 = upper_convolve(channels[t], mu)
ch2 = lower_convolve(channels[t], mu)
channels[t] = quantize_to_size(ch1, mu)
channels[u + t] = quantize_to_size(ch2, mu)
z = np.zeros(block_size)
for i in range(block_size):
z[i] = bhattacharyya_parameter(channels[i])
z = z[bit_reverse_vector(np.arange(block_size), block_power)]
z = upper_bound_z_params(z, block_size, design_snr)
show_progress_bar(block_size, block_size)
print('')
print('channel construction DONE')
return z
def merge_lr_based(q, mu):
lrs = q[0] / q[1]
vals, indices, inv_indices = np.unique(lrs, return_index=True, return_inverse=True)
# compare [1] (20). Ordering of representatives according to LRs.
temp = np.zeros((2, len(indices)), dtype=float)
if vals.size < mu:
return q
for i in range(len(indices)):
merge_pos = np.where(inv_indices == i)[0]
sum_items = q[:, merge_pos]
if merge_pos.size > 1:
sum_items = np.sum(q[:, merge_pos], axis=1)
temp[0, i] = sum_items[0]
temp[1, i] = sum_items[1]
return temp
def upper_convolve(tpm, mu):
q = np.zeros((2, mu ** 2))
idx = -1
for i in range(mu):
idx += 1
q[0, idx] = (tpm[0, i] ** 2 + tpm[1, i] ** 2) / 2
q[1, idx] = tpm[0, i] * tpm[1, i]
for j in range(i + 1, mu):
idx += 1
q[0, idx] = tpm[0, i] * tpm[0, j] + tpm[1, i] * tpm[1, j]
q[1, idx] = tpm[0, i] * tpm[1, j] + tpm[1, i] * tpm[0, j]
if q[0, idx] < q[1, idx]:
q[0, idx], q[1, idx] = swap_values(q[0, idx], q[1, idx])
idx += 1
q = np.delete(q, np.arange(idx, np.shape(q)[1]), axis=1)
q = merge_lr_based(q, mu)
q = normalize_q(q, tpm)
return q
def lower_convolve(tpm, mu):
q = np.zeros((2, mu * (mu + 1)))
idx = -1
for i in range(0, mu):
idx += 1
q[0, idx] = (tpm[0, i] ** 2) / 2
q[1, idx] = (tpm[1, i] ** 2) / 2
if q[0, idx] < q[1, idx]:
q[0, idx], q[1, idx] = swap_values(q[0, idx], q[1, idx])
idx += 1
q[0, idx] = tpm[0, i] * tpm[1, i]
q[1, idx] = q[0, idx]
for j in range(i + 1, mu):
idx += 1
q[0, idx] = tpm[0, i] * tpm[0, j]
q[1, idx] = tpm[1, i] * tpm[1, j]
if q[0, idx] < q[1, idx]:
q[0, idx], q[1, idx] = swap_values(q[0, idx], q[1, idx])
idx += 1
q[0, idx] = tpm[0, i] * tpm[1, j]
q[1, idx] = tpm[1, i] * tpm[0, j]
if q[0, idx] < q[1, idx]:
q[0, idx], q[1, idx] = swap_values(q[0, idx], q[1, idx])
idx += 1
q = np.delete(q, np.arange(idx, np.shape(q)[1]), axis=1)
q = merge_lr_based(q, mu)
q = normalize_q(q, tpm)
return q
def swap_values(first, second):
return second, first
def normalize_q(q, tpm):
original_factor = np.sum(tpm)
next_factor = np.sum(q)
factor = original_factor / next_factor
return q * factor
def main():
print 'channel construction AWGN main'
n = 8
m = 2 ** n
design_snr = 0.0
mu = 16
z_params = tal_vardy_tpm_algorithm(m, design_snr, mu)
print(z_params)
if 0:
import matplotlib.pyplot as plt
plt.plot(z_params)
plt.show()
if __name__ == '__main__':
main()
|
gpl-3.0
|
DEAP/deap
|
examples/es/cma_mo.py
|
1
|
5997
|
# This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
import numpy
from deap import algorithms
from deap import base
from deap import benchmarks
from deap.benchmarks.tools import hypervolume
from deap import cma
from deap import creator
from deap import tools
# Problem size
N = 5
# ZDT1, ZDT2, DTLZ2
MIN_BOUND = numpy.zeros(N)
MAX_BOUND = numpy.ones(N)
EPS_BOUND = 2.e-5
# Kursawe
# MIN_BOUND = numpy.zeros(N) - 5
# MAX_BOUND = numpy.zeros(N) + 5
creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0))
creator.create("Individual", list, fitness=creator.FitnessMin)
def distance(feasible_ind, original_ind):
"""A distance function to the feasibility region."""
return sum((f - o)**2 for f, o in zip(feasible_ind, original_ind))
def closest_feasible(individual):
"""A function returning a valid individual from an invalid one."""
feasible_ind = numpy.array(individual)
feasible_ind = numpy.maximum(MIN_BOUND, feasible_ind)
feasible_ind = numpy.minimum(MAX_BOUND, feasible_ind)
return feasible_ind
def valid(individual):
"""Determines if the individual is valid or not."""
if any(individual < MIN_BOUND) or any(individual > MAX_BOUND):
return False
return True
def close_valid(individual):
"""Determines if the individual is close to valid."""
if any(individual < MIN_BOUND-EPS_BOUND) or any(individual > MAX_BOUND+EPS_BOUND):
return False
return True
toolbox = base.Toolbox()
toolbox.register("evaluate", benchmarks.zdt1)
toolbox.decorate("evaluate", tools.ClosestValidPenalty(valid, closest_feasible, 1.0e+6, distance))
def main():
# The cma module uses the numpy random number generator
# numpy.random.seed(128)
MU, LAMBDA = 10, 10
NGEN = 500
verbose = True
create_plot = False
# The MO-CMA-ES algorithm takes a full population as argument
population = [creator.Individual(x) for x in (numpy.random.uniform(0, 1, (MU, N)))]
for ind in population:
ind.fitness.values = toolbox.evaluate(ind)
strategy = cma.StrategyMultiObjective(population, sigma=1.0, mu=MU, lambda_=LAMBDA)
toolbox.register("generate", strategy.generate, creator.Individual)
toolbox.register("update", strategy.update)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("min", numpy.min, axis=0)
stats.register("max", numpy.max, axis=0)
logbook = tools.Logbook()
logbook.header = ["gen", "nevals"] + (stats.fields if stats else [])
fitness_history = []
for gen in range(NGEN):
# Generate a new population
population = toolbox.generate()
# Evaluate the individuals
fitnesses = toolbox.map(toolbox.evaluate, population)
for ind, fit in zip(population, fitnesses):
ind.fitness.values = fit
fitness_history.append(fit)
# Update the strategy with the evaluated individuals
toolbox.update(population)
record = stats.compile(population) if stats is not None else {}
logbook.record(gen=gen, nevals=len(population), **record)
if verbose:
print(logbook.stream)
if verbose:
print("Final population hypervolume is %f" % hypervolume(strategy.parents, [11.0, 11.0]))
# Note that we use a penalty to guide the search to feasible solutions,
# but there is no guarantee that individuals are valid.
# We expect the best individuals will be within bounds or very close.
num_valid = 0
for ind in strategy.parents:
dist = distance(closest_feasible(ind), ind)
if numpy.isclose(dist, 0.0, rtol=1.e-5, atol=1.e-5):
num_valid += 1
print("Number of valid individuals is %d/%d" % (num_valid, len(strategy.parents)))
print("Final population:")
print(numpy.asarray(strategy.parents))
if create_plot:
interactive = 0
if not interactive:
import matplotlib as mpl_tmp
mpl_tmp.use('Agg') # Force matplotlib to not use any Xwindows backend.
import matplotlib.pyplot as plt
fig = plt.figure()
plt.title("Multi-objective minimization via MO-CMA-ES")
plt.xlabel("First objective (function) to minimize")
plt.ylabel("Second objective (function) to minimize")
# Limit the scale because our history values include the penalty.
plt.xlim((-0.1, 1.20))
plt.ylim((-0.1, 1.20))
# Plot all history. Note the values include the penalty.
fitness_history = numpy.asarray(fitness_history)
plt.scatter(fitness_history[:,0], fitness_history[:,1],
facecolors='none', edgecolors="lightblue")
valid_front = numpy.array([ind.fitness.values for ind in strategy.parents if close_valid(ind)])
invalid_front = numpy.array([ind.fitness.values for ind in strategy.parents if not close_valid(ind)])
if len(valid_front) > 0:
plt.scatter(valid_front[:,0], valid_front[:,1], c="g")
if len(invalid_front) > 0:
plt.scatter(invalid_front[:,0], invalid_front[:,1], c="r")
if interactive:
plt.show()
else:
print("Writing cma_mo.png")
plt.savefig("cma_mo.png")
return strategy.parents
if __name__ == "__main__":
solutions = main()
|
lgpl-3.0
|
pjryan126/solid-start-careers
|
store/api/zillow/venv/lib/python2.7/site-packages/pandas/stats/tests/test_ols.py
|
1
|
35506
|
"""
Unit test suite for OLS and PanelOLS classes
"""
# pylint: disable-msg=W0212
# flake8: noqa
from __future__ import division
from datetime import datetime
from pandas import compat
from distutils.version import LooseVersion
import nose
import numpy as np
from numpy.testing.decorators import slow
from pandas import date_range, bdate_range
from pandas.core.panel import Panel
from pandas import DataFrame, Index, Series, notnull, datetools
from pandas.stats.api import ols
from pandas.stats.ols import _filter_data
from pandas.stats.plm import NonPooledPanelOLS, PanelOLS
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_frame_equal, assertRaisesRegexp)
import pandas.util.testing as tm
import pandas.compat as compat
from .common import BaseTest
_have_statsmodels = True
try:
import statsmodels.api as sm
except ImportError:
try:
import scikits.statsmodels.api as sm
except ImportError:
_have_statsmodels = False
def _check_repr(obj):
repr(obj)
str(obj)
def _compare_ols_results(model1, model2):
tm.assertIsInstance(model1, type(model2))
if hasattr(model1, '_window_type'):
_compare_moving_ols(model1, model2)
else:
_compare_fullsample_ols(model1, model2)
def _compare_fullsample_ols(model1, model2):
assert_series_equal(model1.beta, model2.beta)
def _compare_moving_ols(model1, model2):
assert_frame_equal(model1.beta, model2.beta)
class TestOLS(BaseTest):
_multiprocess_can_split_ = True
# TODO: Add tests for OLS y predict
# TODO: Right now we just check for consistency between full-sample and
# rolling/expanding results of the panel OLS. We should also cross-check
# with trusted implementations of panel OLS (e.g. R).
# TODO: Add tests for non pooled OLS.
@classmethod
def setUpClass(cls):
super(TestOLS, cls).setUpClass()
try:
import matplotlib as mpl
mpl.use('Agg', warn=False)
except ImportError:
pass
if not _have_statsmodels:
raise nose.SkipTest("no statsmodels")
def testOLSWithDatasets_ccard(self):
self.checkDataSet(sm.datasets.ccard.load(), skip_moving=True)
self.checkDataSet(sm.datasets.cpunish.load(), skip_moving=True)
self.checkDataSet(sm.datasets.longley.load(), skip_moving=True)
self.checkDataSet(sm.datasets.stackloss.load(), skip_moving=True)
@slow
def testOLSWithDatasets_copper(self):
self.checkDataSet(sm.datasets.copper.load())
@slow
def testOLSWithDatasets_scotland(self):
self.checkDataSet(sm.datasets.scotland.load())
# degenerate case fails on some platforms
# self.checkDataSet(datasets.ccard.load(), 39, 49) # one col in X all
# 0s
def testWLS(self):
# WLS centered SS changed (fixed) in 0.5.0
sm_version = sm.version.version
if sm_version < LooseVersion('0.5.0'):
raise nose.SkipTest("WLS centered SS not fixed in statsmodels"
" version {0}".format(sm_version))
X = DataFrame(np.random.randn(30, 4), columns=['A', 'B', 'C', 'D'])
Y = Series(np.random.randn(30))
weights = X.std(1)
self._check_wls(X, Y, weights)
weights.ix[[5, 15]] = np.nan
Y[[2, 21]] = np.nan
self._check_wls(X, Y, weights)
def _check_wls(self, x, y, weights):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = ols(y=y, x=x, weights=1 / weights)
combined = x.copy()
combined['__y__'] = y
combined['__weights__'] = weights
combined = combined.dropna()
endog = combined.pop('__y__').values
aweights = combined.pop('__weights__').values
exog = sm.add_constant(combined.values, prepend=False)
sm_result = sm.WLS(endog, exog, weights=1 / aweights).fit()
assert_almost_equal(sm_result.params, result._beta_raw)
assert_almost_equal(sm_result.resid, result._resid_raw)
self.checkMovingOLS('rolling', x, y, weights=weights)
self.checkMovingOLS('expanding', x, y, weights=weights)
def checkDataSet(self, dataset, start=None, end=None, skip_moving=False):
exog = dataset.exog[start: end]
endog = dataset.endog[start: end]
x = DataFrame(exog, index=np.arange(exog.shape[0]),
columns=np.arange(exog.shape[1]))
y = Series(endog, index=np.arange(len(endog)))
self.checkOLS(exog, endog, x, y)
if not skip_moving:
self.checkMovingOLS('rolling', x, y)
self.checkMovingOLS('rolling', x, y, nw_lags=0)
self.checkMovingOLS('expanding', x, y, nw_lags=0)
self.checkMovingOLS('rolling', x, y, nw_lags=1)
self.checkMovingOLS('expanding', x, y, nw_lags=1)
self.checkMovingOLS('expanding', x, y, nw_lags=1, nw_overlap=True)
def checkOLS(self, exog, endog, x, y):
reference = sm.OLS(endog, sm.add_constant(exog, prepend=False)).fit()
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = ols(y=y, x=x)
# check that sparse version is the same
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
sparse_result = ols(y=y.to_sparse(), x=x.to_sparse())
_compare_ols_results(result, sparse_result)
assert_almost_equal(reference.params, result._beta_raw)
assert_almost_equal(reference.df_model, result._df_model_raw)
assert_almost_equal(reference.df_resid, result._df_resid_raw)
assert_almost_equal(reference.fvalue, result._f_stat_raw[0])
assert_almost_equal(reference.pvalues, result._p_value_raw)
assert_almost_equal(reference.rsquared, result._r2_raw)
assert_almost_equal(reference.rsquared_adj, result._r2_adj_raw)
assert_almost_equal(reference.resid, result._resid_raw)
assert_almost_equal(reference.bse, result._std_err_raw)
assert_almost_equal(reference.tvalues, result._t_stat_raw)
assert_almost_equal(reference.cov_params(), result._var_beta_raw)
assert_almost_equal(reference.fittedvalues, result._y_fitted_raw)
_check_non_raw_results(result)
def checkMovingOLS(self, window_type, x, y, weights=None, **kwds):
window = np.linalg.matrix_rank(x.values) * 2
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
moving = ols(y=y, x=x, weights=weights, window_type=window_type,
window=window, **kwds)
# check that sparse version is the same
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
sparse_moving = ols(y=y.to_sparse(), x=x.to_sparse(),
weights=weights,
window_type=window_type,
window=window, **kwds)
_compare_ols_results(moving, sparse_moving)
index = moving._index
for n, i in enumerate(moving._valid_indices):
if window_type == 'rolling' and i >= window:
prior_date = index[i - window + 1]
else:
prior_date = index[0]
date = index[i]
x_iter = {}
for k, v in compat.iteritems(x):
x_iter[k] = v.truncate(before=prior_date, after=date)
y_iter = y.truncate(before=prior_date, after=date)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
static = ols(y=y_iter, x=x_iter, weights=weights, **kwds)
self.compare(static, moving, event_index=i,
result_index=n)
_check_non_raw_results(moving)
FIELDS = ['beta', 'df', 'df_model', 'df_resid', 'f_stat', 'p_value',
'r2', 'r2_adj', 'rmse', 'std_err', 't_stat',
'var_beta']
def compare(self, static, moving, event_index=None,
result_index=None):
index = moving._index
# Check resid if we have a time index specified
if event_index is not None:
ref = static._resid_raw[-1]
label = index[event_index]
res = moving.resid[label]
assert_almost_equal(ref, res)
ref = static._y_fitted_raw[-1]
res = moving.y_fitted[label]
assert_almost_equal(ref, res)
# Check y_fitted
for field in self.FIELDS:
attr = '_%s_raw' % field
ref = getattr(static, attr)
res = getattr(moving, attr)
if result_index is not None:
res = res[result_index]
assert_almost_equal(ref, res)
def test_ols_object_dtype(self):
df = DataFrame(np.random.randn(20, 2), dtype=object)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
model = ols(y=df[0], x=df[1])
summary = repr(model)
class TestOLSMisc(tm.TestCase):
_multiprocess_can_split_ = True
'''
For test coverage with faux data
'''
@classmethod
def setUpClass(cls):
super(TestOLSMisc, cls).setUpClass()
if not _have_statsmodels:
raise nose.SkipTest("no statsmodels")
def test_f_test(self):
x = tm.makeTimeDataFrame()
y = x.pop('A')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
model = ols(y=y, x=x)
hyp = '1*B+1*C+1*D=0'
result = model.f_test(hyp)
hyp = ['1*B=0',
'1*C=0',
'1*D=0']
result = model.f_test(hyp)
assert_almost_equal(result['f-stat'], model.f_stat['f-stat'])
self.assertRaises(Exception, model.f_test, '1*A=0')
def test_r2_no_intercept(self):
y = tm.makeTimeSeries()
x = tm.makeTimeDataFrame()
x_with = x.copy()
x_with['intercept'] = 1.
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
model1 = ols(y=y, x=x)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
model2 = ols(y=y, x=x_with, intercept=False)
assert_series_equal(model1.beta, model2.beta)
# TODO: can we infer whether the intercept is there...
self.assertNotEqual(model1.r2, model2.r2)
# rolling
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
model1 = ols(y=y, x=x, window=20)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
model2 = ols(y=y, x=x_with, window=20, intercept=False)
assert_frame_equal(model1.beta, model2.beta)
self.assertTrue((model1.r2 != model2.r2).all())
def test_summary_many_terms(self):
x = DataFrame(np.random.randn(100, 20))
y = np.random.randn(100)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
model = ols(y=y, x=x)
model.summary
def test_y_predict(self):
y = tm.makeTimeSeries()
x = tm.makeTimeDataFrame()
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
model1 = ols(y=y, x=x)
assert_series_equal(model1.y_predict, model1.y_fitted)
assert_almost_equal(model1._y_predict_raw, model1._y_fitted_raw)
def test_predict(self):
y = tm.makeTimeSeries()
x = tm.makeTimeDataFrame()
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
model1 = ols(y=y, x=x)
assert_series_equal(model1.predict(), model1.y_predict)
assert_series_equal(model1.predict(x=x), model1.y_predict)
assert_series_equal(model1.predict(beta=model1.beta), model1.y_predict)
exog = x.copy()
exog['intercept'] = 1.
rs = Series(np.dot(exog.values, model1.beta.values), x.index)
assert_series_equal(model1.y_predict, rs)
x2 = x.reindex(columns=x.columns[::-1])
assert_series_equal(model1.predict(x=x2), model1.y_predict)
x3 = x2 + 10
pred3 = model1.predict(x=x3)
x3['intercept'] = 1.
x3 = x3.reindex(columns=model1.beta.index)
expected = Series(np.dot(x3.values, model1.beta.values), x3.index)
assert_series_equal(expected, pred3)
beta = Series(0., model1.beta.index)
pred4 = model1.predict(beta=beta)
assert_series_equal(Series(0., pred4.index), pred4)
def test_predict_longer_exog(self):
exogenous = {"1998": "4760", "1999": "5904", "2000": "4504",
"2001": "9808", "2002": "4241", "2003": "4086",
"2004": "4687", "2005": "7686", "2006": "3740",
"2007": "3075", "2008": "3753", "2009": "4679",
"2010": "5468", "2011": "7154", "2012": "4292",
"2013": "4283", "2014": "4595", "2015": "9194",
"2016": "4221", "2017": "4520"}
endogenous = {"1998": "691", "1999": "1580", "2000": "80",
"2001": "1450", "2002": "555", "2003": "956",
"2004": "877", "2005": "614", "2006": "468",
"2007": "191"}
endog = Series(endogenous)
exog = Series(exogenous)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
model = ols(y=endog, x=exog)
pred = model.y_predict
self.assertTrue(pred.index.equals(exog.index))
def test_longpanel_series_combo(self):
wp = tm.makePanel()
lp = wp.to_frame()
y = lp.pop('ItemA')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
model = ols(y=y, x=lp, entity_effects=True, window=20)
self.assertTrue(notnull(model.beta.values).all())
tm.assertIsInstance(model, PanelOLS)
model.summary
def test_series_rhs(self):
y = tm.makeTimeSeries()
x = tm.makeTimeSeries()
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
model = ols(y=y, x=x)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
expected = ols(y=y, x={'x': x})
assert_series_equal(model.beta, expected.beta)
# GH 5233/5250
assert_series_equal(model.y_predict, model.predict(x=x))
def test_various_attributes(self):
# just make sure everything "works". test correctness elsewhere
x = DataFrame(np.random.randn(100, 5))
y = np.random.randn(100)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
model = ols(y=y, x=x, window=20)
series_attrs = ['rank', 'df', 'forecast_mean', 'forecast_vol']
for attr in series_attrs:
value = getattr(model, attr)
tm.assertIsInstance(value, Series)
# works
model._results
def test_catch_regressor_overlap(self):
df1 = tm.makeTimeDataFrame().ix[:, ['A', 'B']]
df2 = tm.makeTimeDataFrame().ix[:, ['B', 'C', 'D']]
y = tm.makeTimeSeries()
data = {'foo': df1, 'bar': df2}
def f():
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
ols(y=y, x=data)
self.assertRaises(Exception, f)
def test_plm_ctor(self):
y = tm.makeTimeDataFrame()
x = {'a': tm.makeTimeDataFrame(),
'b': tm.makeTimeDataFrame()}
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
model = ols(y=y, x=x, intercept=False)
model.summary
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
model = ols(y=y, x=Panel(x))
model.summary
def test_plm_attrs(self):
y = tm.makeTimeDataFrame()
x = {'a': tm.makeTimeDataFrame(),
'b': tm.makeTimeDataFrame()}
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
rmodel = ols(y=y, x=x, window=10)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
model = ols(y=y, x=x)
model.resid
rmodel.resid
def test_plm_lagged_y_predict(self):
y = tm.makeTimeDataFrame()
x = {'a': tm.makeTimeDataFrame(),
'b': tm.makeTimeDataFrame()}
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
model = ols(y=y, x=x, window=10)
result = model.lagged_y_predict(2)
def test_plm_f_test(self):
y = tm.makeTimeDataFrame()
x = {'a': tm.makeTimeDataFrame(),
'b': tm.makeTimeDataFrame()}
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
model = ols(y=y, x=x)
hyp = '1*a+1*b=0'
result = model.f_test(hyp)
hyp = ['1*a=0',
'1*b=0']
result = model.f_test(hyp)
assert_almost_equal(result['f-stat'], model.f_stat['f-stat'])
def test_plm_exclude_dummy_corner(self):
y = tm.makeTimeDataFrame()
x = {'a': tm.makeTimeDataFrame(),
'b': tm.makeTimeDataFrame()}
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
model = ols(
y=y, x=x, entity_effects=True, dropped_dummies={'entity': 'D'})
model.summary
def f():
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
ols(y=y, x=x, entity_effects=True,
dropped_dummies={'entity': 'E'})
self.assertRaises(Exception, f)
def test_columns_tuples_summary(self):
# #1837
X = DataFrame(np.random.randn(10, 2), columns=[('a', 'b'), ('c', 'd')])
Y = Series(np.random.randn(10))
# it works!
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
model = ols(y=Y, x=X)
model.summary
class TestPanelOLS(BaseTest):
_multiprocess_can_split_ = True
FIELDS = ['beta', 'df', 'df_model', 'df_resid', 'f_stat',
'p_value', 'r2', 'r2_adj', 'rmse', 'std_err',
't_stat', 'var_beta']
_other_fields = ['resid', 'y_fitted']
def testFiltering(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = ols(y=self.panel_y2, x=self.panel_x2)
x = result._x
index = x.index.get_level_values(0)
index = Index(sorted(set(index)))
exp_index = Index([datetime(2000, 1, 1), datetime(2000, 1, 3)])
self.assertTrue
(exp_index.equals(index))
index = x.index.get_level_values(1)
index = Index(sorted(set(index)))
exp_index = Index(['A', 'B'])
self.assertTrue(exp_index.equals(index))
x = result._x_filtered
index = x.index.get_level_values(0)
index = Index(sorted(set(index)))
exp_index = Index([datetime(2000, 1, 1),
datetime(2000, 1, 3),
datetime(2000, 1, 4)])
self.assertTrue(exp_index.equals(index))
assert_almost_equal(result._y.values.flat, [1, 4, 5])
exp_x = [[6, 14, 1],
[9, 17, 1],
[30, 48, 1]]
assert_almost_equal(exp_x, result._x.values)
exp_x_filtered = [[6, 14, 1],
[9, 17, 1],
[30, 48, 1],
[11, 20, 1],
[12, 21, 1]]
assert_almost_equal(exp_x_filtered, result._x_filtered.values)
self.assertTrue(result._x_filtered.index.levels[0].equals(
result.y_fitted.index))
def test_wls_panel(self):
y = tm.makeTimeDataFrame()
x = Panel({'x1': tm.makeTimeDataFrame(),
'x2': tm.makeTimeDataFrame()})
y.ix[[1, 7], 'A'] = np.nan
y.ix[[6, 15], 'B'] = np.nan
y.ix[[3, 20], 'C'] = np.nan
y.ix[[5, 11], 'D'] = np.nan
stack_y = y.stack()
stack_x = DataFrame(dict((k, v.stack())
for k, v in x.iteritems()))
weights = x.std('items')
stack_weights = weights.stack()
stack_y.index = stack_y.index._tuple_index
stack_x.index = stack_x.index._tuple_index
stack_weights.index = stack_weights.index._tuple_index
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = ols(y=y, x=x, weights=1 / weights)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
expected = ols(y=stack_y, x=stack_x, weights=1 / stack_weights)
assert_almost_equal(result.beta, expected.beta)
for attr in ['resid', 'y_fitted']:
rvals = getattr(result, attr).stack().values
evals = getattr(expected, attr).values
assert_almost_equal(rvals, evals)
def testWithTimeEffects(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = ols(y=self.panel_y2, x=self.panel_x2, time_effects=True)
assert_almost_equal(result._y_trans.values.flat, [0, -0.5, 0.5])
exp_x = [[0, 0], [-10.5, -15.5], [10.5, 15.5]]
assert_almost_equal(result._x_trans.values, exp_x)
# _check_non_raw_results(result)
def testWithEntityEffects(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = ols(y=self.panel_y2, x=self.panel_x2, entity_effects=True)
assert_almost_equal(result._y.values.flat, [1, 4, 5])
exp_x = DataFrame([[0., 6., 14., 1.], [0, 9, 17, 1], [1, 30, 48, 1]],
index=result._x.index, columns=['FE_B', 'x1', 'x2',
'intercept'],
dtype=float)
tm.assert_frame_equal(result._x, exp_x.ix[:, result._x.columns])
# _check_non_raw_results(result)
def testWithEntityEffectsAndDroppedDummies(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = ols(y=self.panel_y2, x=self.panel_x2, entity_effects=True,
dropped_dummies={'entity': 'B'})
assert_almost_equal(result._y.values.flat, [1, 4, 5])
exp_x = DataFrame([[1., 6., 14., 1.], [1, 9, 17, 1], [0, 30, 48, 1]],
index=result._x.index, columns=['FE_A', 'x1', 'x2',
'intercept'],
dtype=float)
tm.assert_frame_equal(result._x, exp_x.ix[:, result._x.columns])
# _check_non_raw_results(result)
def testWithXEffects(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = ols(y=self.panel_y2, x=self.panel_x2, x_effects=['x1'])
assert_almost_equal(result._y.values.flat, [1, 4, 5])
res = result._x
exp_x = DataFrame([[0., 0., 14., 1.], [0, 1, 17, 1], [1, 0, 48, 1]],
columns=['x1_30', 'x1_9', 'x2', 'intercept'],
index=res.index, dtype=float)
assert_frame_equal(res, exp_x.reindex(columns=res.columns))
def testWithXEffectsAndDroppedDummies(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = ols(y=self.panel_y2, x=self.panel_x2, x_effects=['x1'],
dropped_dummies={'x1': 30})
res = result._x
assert_almost_equal(result._y.values.flat, [1, 4, 5])
exp_x = DataFrame([[1., 0., 14., 1.], [0, 1, 17, 1], [0, 0, 48, 1]],
columns=['x1_6', 'x1_9', 'x2', 'intercept'],
index=res.index, dtype=float)
assert_frame_equal(res, exp_x.reindex(columns=res.columns))
def testWithXEffectsAndConversion(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = ols(y=self.panel_y3, x=self.panel_x3,
x_effects=['x1', 'x2'])
assert_almost_equal(result._y.values.flat, [1, 2, 3, 4])
exp_x = [[0, 0, 0, 1, 1], [1, 0, 0, 0, 1], [0, 1, 1, 0, 1],
[0, 0, 0, 1, 1]]
assert_almost_equal(result._x.values, exp_x)
exp_index = Index(['x1_B', 'x1_C', 'x2_baz', 'x2_foo', 'intercept'])
self.assertTrue(exp_index.equals(result._x.columns))
# _check_non_raw_results(result)
def testWithXEffectsAndConversionAndDroppedDummies(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = ols(y=self.panel_y3, x=self.panel_x3, x_effects=['x1', 'x2'],
dropped_dummies={'x2': 'foo'})
assert_almost_equal(result._y.values.flat, [1, 2, 3, 4])
exp_x = [[0, 0, 0, 0, 1], [1, 0, 1, 0, 1], [0, 1, 0, 1, 1],
[0, 0, 0, 0, 1]]
assert_almost_equal(result._x.values, exp_x)
exp_index = Index(['x1_B', 'x1_C', 'x2_bar', 'x2_baz', 'intercept'])
self.assertTrue(exp_index.equals(result._x.columns))
# _check_non_raw_results(result)
def testForSeries(self):
self.checkForSeries(self.series_panel_x, self.series_panel_y,
self.series_x, self.series_y)
self.checkForSeries(self.series_panel_x, self.series_panel_y,
self.series_x, self.series_y, nw_lags=0)
self.checkForSeries(self.series_panel_x, self.series_panel_y,
self.series_x, self.series_y, nw_lags=1,
nw_overlap=True)
def testRolling(self):
self.checkMovingOLS(self.panel_x, self.panel_y)
def testRollingWithFixedEffects(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
entity_effects=True)
self.checkMovingOLS(self.panel_x, self.panel_y, intercept=False,
entity_effects=True)
def testRollingWithTimeEffects(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
time_effects=True)
def testRollingWithNeweyWest(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
nw_lags=1)
def testRollingWithEntityCluster(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
cluster='entity')
def testUnknownClusterRaisesValueError(self):
assertRaisesRegexp(ValueError, "Unrecognized cluster.*ridiculous",
self.checkMovingOLS, self.panel_x, self.panel_y,
cluster='ridiculous')
def testRollingWithTimeEffectsAndEntityCluster(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
time_effects=True, cluster='entity')
def testRollingWithTimeCluster(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
cluster='time')
def testRollingWithNeweyWestAndEntityCluster(self):
self.assertRaises(ValueError, self.checkMovingOLS,
self.panel_x, self.panel_y,
nw_lags=1, cluster='entity')
def testRollingWithNeweyWestAndTimeEffectsAndEntityCluster(self):
self.assertRaises(ValueError,
self.checkMovingOLS, self.panel_x, self.panel_y,
nw_lags=1, cluster='entity',
time_effects=True)
def testExpanding(self):
self.checkMovingOLS(
self.panel_x, self.panel_y, window_type='expanding')
def testNonPooled(self):
self.checkNonPooled(y=self.panel_y, x=self.panel_x)
self.checkNonPooled(y=self.panel_y, x=self.panel_x,
window_type='rolling', window=25, min_periods=10)
def testUnknownWindowType(self):
assertRaisesRegexp(ValueError, "window.*ridiculous",
self.checkNonPooled, y=self.panel_y, x=self.panel_x,
window_type='ridiculous', window=25, min_periods=10)
def checkNonPooled(self, x, y, **kwds):
# For now, just check that it doesn't crash
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = ols(y=y, x=x, pool=False, **kwds)
_check_repr(result)
for attr in NonPooledPanelOLS.ATTRIBUTES:
_check_repr(getattr(result, attr))
def checkMovingOLS(self, x, y, window_type='rolling', **kwds):
window = 25 # must be larger than rank of x
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
moving = ols(y=y, x=x, window_type=window_type,
window=window, **kwds)
index = moving._index
for n, i in enumerate(moving._valid_indices):
if window_type == 'rolling' and i >= window:
prior_date = index[i - window + 1]
else:
prior_date = index[0]
date = index[i]
x_iter = {}
for k, v in compat.iteritems(x):
x_iter[k] = v.truncate(before=prior_date, after=date)
y_iter = y.truncate(before=prior_date, after=date)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
static = ols(y=y_iter, x=x_iter, **kwds)
self.compare(static, moving, event_index=i,
result_index=n)
_check_non_raw_results(moving)
def checkForSeries(self, x, y, series_x, series_y, **kwds):
# Consistency check with simple OLS.
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = ols(y=y, x=x, **kwds)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
reference = ols(y=series_y, x=series_x, **kwds)
self.compare(reference, result)
def compare(self, static, moving, event_index=None,
result_index=None):
# Check resid if we have a time index specified
if event_index is not None:
staticSlice = _period_slice(static, -1)
movingSlice = _period_slice(moving, event_index)
ref = static._resid_raw[staticSlice]
res = moving._resid_raw[movingSlice]
assert_almost_equal(ref, res)
ref = static._y_fitted_raw[staticSlice]
res = moving._y_fitted_raw[movingSlice]
assert_almost_equal(ref, res)
# Check y_fitted
for field in self.FIELDS:
attr = '_%s_raw' % field
ref = getattr(static, attr)
res = getattr(moving, attr)
if result_index is not None:
res = res[result_index]
assert_almost_equal(ref, res)
def test_auto_rolling_window_type(self):
data = tm.makeTimeDataFrame()
y = data.pop('A')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
window_model = ols(y=y, x=data, window=20, min_periods=10)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
rolling_model = ols(y=y, x=data, window=20, min_periods=10,
window_type='rolling')
assert_frame_equal(window_model.beta, rolling_model.beta)
def test_group_agg(self):
from pandas.stats.plm import _group_agg
values = np.ones((10, 2)) * np.arange(10).reshape((10, 1))
bounds = np.arange(5) * 2
f = lambda x: x.mean(axis=0)
agged = _group_agg(values, bounds, f)
assert(agged[1][0] == 2.5)
assert(agged[2][0] == 4.5)
# test a function that doesn't aggregate
f2 = lambda x: np.zeros((2, 2))
self.assertRaises(Exception, _group_agg, values, bounds, f2)
def _check_non_raw_results(model):
_check_repr(model)
_check_repr(model.resid)
_check_repr(model.summary_as_matrix)
_check_repr(model.y_fitted)
_check_repr(model.y_predict)
def _period_slice(panelModel, i):
index = panelModel._x_trans.index
period = index.levels[0][i]
L, R = index.get_major_bounds(period, period)
return slice(L, R)
class TestOLSFilter(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
date_index = date_range(datetime(2009, 12, 11), periods=3,
freq=datetools.bday)
ts = Series([3, 1, 4], index=date_index)
self.TS1 = ts
date_index = date_range(datetime(2009, 12, 11), periods=5,
freq=datetools.bday)
ts = Series([1, 5, 9, 2, 6], index=date_index)
self.TS2 = ts
date_index = date_range(datetime(2009, 12, 11), periods=3,
freq=datetools.bday)
ts = Series([5, np.nan, 3], index=date_index)
self.TS3 = ts
date_index = date_range(datetime(2009, 12, 11), periods=5,
freq=datetools.bday)
ts = Series([np.nan, 5, 8, 9, 7], index=date_index)
self.TS4 = ts
data = {'x1': self.TS2, 'x2': self.TS4}
self.DF1 = DataFrame(data=data)
data = {'x1': self.TS2, 'x2': self.TS4}
self.DICT1 = data
def testFilterWithSeriesRHS(self):
(lhs, rhs, weights, rhs_pre,
index, valid) = _filter_data(self.TS1, {'x1': self.TS2}, None)
self.tsAssertEqual(self.TS1, lhs)
self.tsAssertEqual(self.TS2[:3], rhs['x1'])
self.tsAssertEqual(self.TS2, rhs_pre['x1'])
def testFilterWithSeriesRHS2(self):
(lhs, rhs, weights, rhs_pre,
index, valid) = _filter_data(self.TS2, {'x1': self.TS1}, None)
self.tsAssertEqual(self.TS2[:3], lhs)
self.tsAssertEqual(self.TS1, rhs['x1'])
self.tsAssertEqual(self.TS1, rhs_pre['x1'])
def testFilterWithSeriesRHS3(self):
(lhs, rhs, weights, rhs_pre,
index, valid) = _filter_data(self.TS3, {'x1': self.TS4}, None)
exp_lhs = self.TS3[2:3]
exp_rhs = self.TS4[2:3]
exp_rhs_pre = self.TS4[1:]
self.tsAssertEqual(exp_lhs, lhs)
self.tsAssertEqual(exp_rhs, rhs['x1'])
self.tsAssertEqual(exp_rhs_pre, rhs_pre['x1'])
def testFilterWithDataFrameRHS(self):
(lhs, rhs, weights, rhs_pre,
index, valid) = _filter_data(self.TS1, self.DF1, None)
exp_lhs = self.TS1[1:]
exp_rhs1 = self.TS2[1:3]
exp_rhs2 = self.TS4[1:3]
self.tsAssertEqual(exp_lhs, lhs)
self.tsAssertEqual(exp_rhs1, rhs['x1'])
self.tsAssertEqual(exp_rhs2, rhs['x2'])
def testFilterWithDictRHS(self):
(lhs, rhs, weights, rhs_pre,
index, valid) = _filter_data(self.TS1, self.DICT1, None)
exp_lhs = self.TS1[1:]
exp_rhs1 = self.TS2[1:3]
exp_rhs2 = self.TS4[1:3]
self.tsAssertEqual(exp_lhs, lhs)
self.tsAssertEqual(exp_rhs1, rhs['x1'])
self.tsAssertEqual(exp_rhs2, rhs['x2'])
def tsAssertEqual(self, ts1, ts2):
self.assert_numpy_array_equal(ts1, ts2)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
gpl-2.0
|
NelisVerhoef/scikit-learn
|
sklearn/__init__.py
|
154
|
3014
|
"""
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.17.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve',
'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
|
bsd-3-clause
|
aminert/scikit-learn
|
sklearn/mixture/gmm.py
|
128
|
31069
|
"""
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
# Bertrand Thirion <[email protected]>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
if thresh is not None:
warnings.warn("'thresh' has been replaced by 'tol' in 0.16 "
" and will be removed in 0.18.",
DeprecationWarning)
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
# this line should be removed when 'thresh' is removed in v0.18
tol = (self.tol if self.thresh is None
else self.thresh / float(X.shape[0]))
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
# (should compare to self.tol when deprecated 'thresh' is
# removed in v0.18)
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weights
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
|
bsd-3-clause
|
bonus85/csv-analytics
|
simple.py
|
1
|
1557
|
# -*- coding: utf-8 -*-
import process_data
from matplotlib import pyplot as plt
file_name = 'test.csv' # Pass på å bruke riktig datasett
analyzer = process_data.PowerConsumptionAnalyzer(file_name)
# analyzer.write_max('test_worksheet')
# analyzer.close_wb()
month = analyzer.get_month(2013, 2)
sorted_month = month.sort_values(ascending=False)
print month.shape # formatet på month (antall dager, antall timer)
print month[1] # dag 1, hver time (24t), format: liste
#print month[1, 5] # dag 1, time 5
#plt.plot(range(1,25), month[1], label='1')
#plt.plot(range(1,25), month[2], label='2')
#plt.plot(range(1,25), month[3], label='3')
print sorted_month[:15]
target_value = sorted_month[0]*0.8 # 80% av maks
print 'Target value:', target_value
antall=0
for hour in sorted_month:
if hour < target_value: break
print hour
antall = antall+1
print 'Antall:', antall
neighbor_hours = analyzer.get_neighbor_hours(sorted_month[:antall], return_single=True)
print neighbor_hours
print "Target value: ", target_value
for nh, top in zip(neighbor_hours, sorted_month[:antall]):
good_top=False
diff_top = top-target_value
diff_0 = target_value-nh[0]
if diff_0 < 0:
diff_0 = 0
diff_1 = target_value-nh[1]
if diff_1 < 0:
diff_1 = 0
if diff_top < diff_0 + diff_1:
good_top = True
print nh[0], top, nh[1], good_top
plt.plot(process_data.date_index(month), month, 's-')
#plt.legend()
#plt.suptitle('Januar 2014') # figurtittel
#plt.xlabel('Dato')
plt.ylabel('kWh')
plt.show()
|
gpl-3.0
|
johnmgregoire/NanoCalorimetry
|
accalinitcode_Sn10fast.py
|
1
|
2815
|
import numpy, h5py, pylab
from PnSC_h5io import *
from matplotlib.ticker import FuncFormatter
def myexpformat(x, pos):
for ndigs in range(5):
lab=(('%.'+'%d' %ndigs+'e') %x).replace('e+0','e').replace('e+','e').replace('e0','').replace('e-0','e-')
if eval(lab)==x:
return lab
return lab
ExpTickLabels=FuncFormatter(myexpformat)
p='C:/Users/JohnnyG/Documents/PythonCode/Vlassak/NanoCalorimetry/20110714_SnACcal.h5'
#f=h5py.File(p,mode='r')
seg=3
exp='Sn_10kHz_4e4Ks'
skip=50
skipe=20
f, hpp=experimenthppaths(p, exp)
daqHz=f[hpp[0]].attrs['daqHz']
f.close()
hpp=['/Calorimetry/Sn_10kHz_4e4Ks/measurement/HeatProgram/cell29_57.75dc56.1ac_10kHz_12.6ms_1_of_1', '/Calorimetry/Sn_10kHz_4e4Ks/measurement/HeatProgram/cell7_57.75dc56.1ac_10kHz_12.6ms_1_of_1']
labs=['10kHz, 10Ohm Res','fast ramp']
targetf=1.e4
#labs=[hp.rpartition('/')[2] for hp in hpp]
nplots=4
pylab.figure(figsize=(20, 8))
for i, (hp, title) in enumerate(zip(hpp, labs)):
hpsdl=CreateHeatProgSegDictList(p, exp, hp.rpartition('/')[2])
sampv=hpsdl[seg]['samplevoltage'][0][skip:-1*skipe]
diffv=hpsdl[seg]['samplehighpassacvoltage'][0][skip:-1*skipe]
t=hpsdl[seg]['cycletime'][0][skip:-1*skipe]
pylab.subplot(len(hpp), nplots, nplots*i+1)
sy=sampv
pylab.plot((t*1000.)[:4000], sy[:4000], 'g.', markersize=1)
pylab.gca().yaxis.set_major_formatter(ExpTickLabels)
#pylab.ylim(-620, 620)
pylab.title(title)
pylab.ylabel('sample channel V')
pylab.subplot(len(hpp), nplots, nplots*i+2)
y=diffv
pylab.plot((t*1000.)[:4000], y[:4000], 'r.', markersize=1)
pylab.gca().yaxis.set_major_formatter(ExpTickLabels)
#pylab.ylim(-620, 620)
pylab.title(title)
pylab.ylabel('filtered channel, V')
pylab.subplot(len(hpp), nplots, nplots*i+3)
fft=numpy.fft.fft(y)
freq=numpy.fft.fftfreq(len(y))*daqHz
pylab.loglog(freq[:len(freq)//2], numpy.abs(fft[:len(freq)//2]))
pylab.ylabel('filtered channel fft mag.')
pylab.subplot(len(hpp), nplots, nplots*i+4)
pylab.loglog(freq[:len(freq)//2], numpy.abs(fft[:len(freq)//2]))
pylab.xlim(.9*targetf, 4*targetf)
pylab.xticks([targetf, 2.*targetf, 3.*targetf])
pylab.ylabel('filtered channel fft mag.')
pylab.subplot(len(hpp), nplots, nplots*i+1)
pylab.xlabel('time (ms)')
pylab.subplot(len(hpp), nplots, nplots*i+2)
pylab.xlabel('time (ms)')
pylab.subplot(len(hpp), nplots, nplots*i+3)
pylab.xlabel('freq (Hz)')
pylab.subplot(len(hpp), nplots, nplots*i+4)
pylab.xlabel('freq (Hz)')
pylab.suptitle('response for 10mAdc+9mAac into 10$\Omega$')
pylab.subplots_adjust(left=.07, right=.97, wspace=.35, hspace=.25)
if True:
pylab.savefig(os.path.join('C:/Users/JohnnyG/Documents/HarvardWork/ACcal/20110714_Sn_analysis', '_'.join(('FFT', exp)))+'.png')
pylab.show()
|
bsd-3-clause
|
mhue/scikit-learn
|
examples/covariance/plot_sparse_cov.py
|
300
|
5078
|
"""
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
|
bsd-3-clause
|
jashwanth9/Expert-recommendation-system
|
code/contentBoostedCF.py
|
1
|
4576
|
#simplified implementation of collabarative filtering
import numpy as np
from sklearn.neighbors import NearestNeighbors
from sklearn.metrics.pairwise import cosine_similarity
from scipy.spatial import distance
import pdb
import warnings
from scipy import sparse
import cPickle as pickle
from sklearn.naive_bayes import MultinomialNB
import collections
def loadTrainTestData():
trainData = []
with open('../train_data/invited_info_train.txt', 'r') as f1:
for line in f1:
line = line.rstrip('\n')
sp = line.split()
trainData.append((sp[0], sp[1], int(sp[2])))
testData = []
with open('../train_data/test_nolabel.txt', 'r') as f1:
line = f1.readline()
for line in f1:
testData.append(line.rstrip('\r\n').split(','))
return trainData, testData
def loadData():
print "loading data"
useritem_sparse = pickle.load(open('../features/useritemmatrix_normalized.dat', 'rb'))
valData = []
question_feats = {}
trainData = []
with open('../train_data/validate_nolabel.txt', 'r') as f1:
header = f1.readline()
for line in f1:
valData.append(line.rstrip('\r\n').split(','))
ques_keys = pickle.load(open('../train_data/question_info_keys.dat', 'rb'))
user_keys = pickle.load(open('../train_data/user_info_keys.dat', 'rb'))
user_keys_map = {}
ques_keys_map = {}
for i in range(len(user_keys)):
user_keys_map[user_keys[i]] = i
for i in range(len(ques_keys)):
ques_keys_map[ques_keys[i]] = i
# tf = pickle.load(open('../features/ques_charid_tfidf.dat', 'rb'))
# tfx = tf.toarray()
# for i in range(len(tfx)):
# question_feats[ques_keys[i]] = tfx[0].tolist()
topics = []
with open('../train_data/question_info.txt', 'r') as f1:
for line in f1:
topic = int(line.split()[1])
topics.append(topic)
for i in range(len(ques_keys)):
question_feats[ques_keys[i]] = [1 if x == topics[i] else 0 for x in range(22)]
with open('../train_data/invited_info_train.txt', 'r') as f1:
for line in f1:
line = line.rstrip('\n')
sp = line.split()
trainData.append((sp[0], sp[1], int(sp[2])))
return useritem_sparse, valData, ques_keys, user_keys, trainData, question_feats, ques_keys_map, user_keys_map
def getModels(trainData, question_feats):
print "getting models"
userX = {}
userY = {}
for qid, uid, val in trainData:
if uid not in userX:
userX[uid] = []
userY[uid] = []
userX[uid].append(question_feats[qid])
userY[uid].append(val)
nbmodels = {}
for user in userX:
nbmodels[user] = MultinomialNB()
nbmodels[user].fit(userX[user], userY[user])
return nbmodels
def contentBoosting(user_keys, ques_keys, useritem, usermodels, question_feats):
print "boosting"
useritem = useritem.toarray()
topredict = [question_feats[ques_keys[i]] for i in range(len(ques_keys))]
for i in range(0, len(user_keys)):
if user_keys[i] not in usermodels:
continue
predictions = usermodels[user_keys[i]].predict(topredict)
for j in range(0, len(ques_keys)):
if useritem[i][j] == 0:
prediction = predictions[j]
if prediction == 1:
useritem[i][j] = 1
elif prediction == 0:
useritem[i][j] = -0.125
else:
print prediction
return useritem
def collabFilteringPredictions(useritem, sparse, k, valData, ques_keys_map, user_keys_map):
print "getting predictions"
#input: useritem matrix
#sparese: whether useritem is sparse or not
#k : k nearest neighbors to consider
#returns list of predictions
similarities = cosine_similarity(useritem)
scores = []
print similarities.shape
useritemfull = useritem
for qid, uid in valData:
score = 0
for nbindex in similarities[user_keys_map[uid]].argsort()[(-k-1):]:
if nbindex == user_keys_map[uid]: #exclude self
continue
score += useritemfull[nbindex][ques_keys_map[qid]]*similarities[user_keys_map[uid]][nbindex]
scores.append(score)
predictions = []
#normalization
maxscore = max(scores)
minscore = min(scores)
for score in scores:
predictions.append((score-minscore)/float(maxscore-minscore))
return predictions
k = 180
useritem_sparse, valData, ques_keys, user_keys, trainData, question_feats, ques_keys_map, user_keys_map = loadData()
usermodels = getModels(trainData, question_feats)
useritem = contentBoosting(user_keys, ques_keys, useritem_sparse, usermodels, question_feats)
predictions = collabFilteringPredictions(useritem, False, k, valData, ques_keys_map, user_keys_map)
with open('../validation/content_boosted_'+str(k)+'.csv', 'w') as f1:
f1.write('qid,uid,label\n')
for i in range(0, len(predictions)):
f1.write(valData[i][0]+','+valData[i][1]+','+str(predictions[i])+'\n')
|
apache-2.0
|
j08lue/poppy
|
poppy/animators.py
|
1
|
11088
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import matplotlib.patheffects as mpatheffects
from mpl_toolkits.axes_grid1 import make_axes_locatable
import netCDF4
import time
import datetime
import pyutils.dates as pud
import pycpt.modify
import pycpt.load
pycpt.load.register_cptcity_cmaps('http://soliton.vm.bytemark.co.uk/pub/cpt-city/gmt/GMT_ocean.cpt')
def _update_time(ani,fname):
with netCDF4.Dataset(fname) as ds:
timevar = ds.variables['time']
ani.date = pud.pseudo_to_proper_datetime(
netCDF4.num2date(timevar[0],timevar.units,timevar.calendar))
ani.date -= datetime.timedelta(days=1)
newyear = pud.datetime_to_decimal_year(ani.date)
try:
ani.elapsed = newyear - ani.initialyear
except AttributeError:
ani.elapsed = 0.
ani.initialyear = newyear
ani.year = newyear
def _update_timestamp(ani, fname):
_update_time(ani, fname)
text = 'model year: {0.year:04d}-{0.month:02d}\nelapsed: {1:.2f} years'.format(ani.date,ani.elapsed)
try:
ani.timestamp.set_text(text)
except AttributeError:
ani.timestamp = ani.ax.text(0.95,0.05,text,color='k',
ha='right',va='bottom',transform=ani.ax.transAxes,
path_effects=([mpatheffects.withStroke(linewidth=3, foreground='white')]))
def _find_depth_level(fname, depth):
with netCDF4.Dataset(fname) as ds:
return np.argmin(np.abs(ds.variables['z_w'][:]*1e-2 - depth))
def _get_level_depth(fname, k):
with netCDF4.Dataset(fname) as ds:
return ds.variables['z_w'][k]*1e-2
class Layer:
"""Horizontal layer of a given 4D variable"""
def __init__(self,
ax,
ncfiles,
varname,
scale=1.,
levels = None,
cmap = 'RdYlBu_r',
k=0,
depth=None,
t=0,
ii=None, jj=None,
pause = 0,
with_timestamp = True,
):
"""
Create a new horizontal layer animation
Parameters
----------
ax : axis
axis to plot on
ncfiles : list
input files
varname : str
netCDF variable name
scale : float
scale the data by this factor
levels : sequence, optional
data levels for plotting
cmap : str or plt.cm
color map
k : int
vertical level (0-based)
depth : float
depth to find corresponding k for
t : int
time level for each file
ii,jj : ndarrays, optional
indices for subregion
pause : float, optional
pause during each iteration step
"""
self.ax = ax
self.ncfiles = ncfiles
self.varname = varname
self.scale = scale
self.t = t
#depth
self.depth = depth
if depth is not None:
self.k = _find_depth_level(ncfiles[0], depth)
elif k is not None:
self.k = k
else:
raise ValueError('Either k or depth must be specified.')
self.depth_k = _get_level_depth(ncfiles[0], self.k)
# data shape
self.datashape = self._get_datashape()
self.ndim = len(self.datashape)
if self.ndim not in [3,4]:
raise NotImplementedError('Only 4D and 3D variables supported!')
self.ny,self.nx = ny,nx = self.datashape[-2:]
self.ii = np.arange(nx) if ii is None else np.mod(ii,nx)
self.ii_original = ii
self.jj = np.arange(ny) if jj is None else jj
self.fig = self.ax.get_figure()
self.pause = pause
self.with_timestamp = with_timestamp
self._make_axes()
self._update_long_name(ncfiles[0])
self.ax.autoscale(axis='both',tight=True)
# levels
if levels is None:
sample_data = self._get_data(self.ncfiles[-1])
ticker = mticker.MaxNLocator(nbins=21, symmetric=True)
levels = ticker.tick_values(sample_data.min(), sample_data.max())
self.cmap, self.norm = pycpt.modify.generate_cmap_norm(levels=levels, cm=cmap)
def _update_long_name(self,fname):
with netCDF4.Dataset(fname) as ds:
self.long_name = '{0.long_name} ({0.units})'.format(ds.variables[self.varname])
if len(self.datashape) == 4:
self.long_name += ' at {:.0f} m'.format(self.depth_k)
def init(self,cbarpos='right'):
fname = self.ncfiles[0]
self.data = self._get_data(fname)
self.img = self.ax.pcolormesh(
self.xax,self.yax,
self.data,
cmap=self.cmap,norm=self.norm)
divider = make_axes_locatable(self.ax)
cax = divider.append_axes(cbarpos, size="5%", pad=0.05)
self.cb = self.fig.colorbar(self.img, cax=cax, orientation='vertical',
#format='%.1e',
#label = self.long_name,
)
self.cb.ax.yaxis.set_ticks_position(cbarpos)
#self.cb.ax.yaxis.set_label_position(cbarpos)
if self.with_timestamp: _update_timestamp(self,fname)
self.ax.set_xticklabels(['{:.0f}'.format(np.mod(i,self.nx)) for i in self.ax.get_xticks()])
self.ax.set_title(self.long_name)
return self.img
def _get_datashape(self, fname=None):
fname = fname or self.ncfiles[0]
with netCDF4.Dataset(fname) as ds:
self.datashape = ds.variables[self.varname].shape
return self.datashape
def _get_data(self, fname):
with netCDF4.Dataset(fname) as ds:
if self.ndim == 4:
return ds.variables[self.varname][self.t,self.k,self.jj,self.ii] * self.scale
elif self.ndim == 3:
return ds.variables[self.varname][self.t,self.jj,self.ii] * self.scale
def _make_axes(self):
try:
self.xax = np.concatenate((self.ii_original,[self.ii_original[-1]+1]))-0.5
self.yax = self.jj
except TypeError:
self.xax = np.arange(len(self.ii)+1,dtype=float)-0.5
self.yax = np.arange(len(self.jj)+1,dtype=float)-0.5
def __call__(self, i):
fname = self.ncfiles[i]
self.data = self._get_data(fname)
time.sleep(self.pause)
self.img.set_array(self.data.ravel())
if self.with_timestamp: _update_timestamp(self,fname)
return self.img
class VerticalSection:
"""Vertical section of a given variable"""
def __init__(self,
ax,
ncfiles,
varname,
ii,jj,
scale = 1.,
levels = None,
cmap = 'RdYlBu_r',
t = 0,
pause = 0,
with_timestamp = True,
limit_k = True,
):
"""
Create a new vertical section animation
Parameters
----------
ax : axis
axis to plot on
ncfiles : list
input files
varname : str
netCDF variable name
ii,jj : ndarrays, optional
indices for subregion
scale : float
scale the data by this factor
levels : sequence, optional
data levels for plotting
cmap : str or plt.cm
color map
t : int
time level for each file
pause : float, optional
pause during each iteration step
"""
self.ax = ax
self.t = t
self.ncfiles = ncfiles
self.varname = varname
self.scale = scale
self._update_long_name(ncfiles[0])
self.datashape = self._get_datashape(ncfiles[0])
# indices
#if isinstance(jj, int):
# jj = np.ones(len(ii), int) * jj
#elif isinstance(ii, int):
# ii = np.ones(len(jj), int) * ii
self.jj = jj
self.ii = ii #np.mod(ii, self.datashape[3])
# maxk
self.maxk = self.datashape[-3]
if limit_k:
sample_data = self._get_data(self.ncfiles[-1])
self.maxk = np.max(np.sum(~np.ma.getmaskarray(sample_data),axis=0))
self.fig = self.ax.get_figure()
self.pause = pause
self.with_timestamp = with_timestamp
self._make_axes(ncfiles[0])
self.ax.autoscale(axis='x',tight=True)
self.ax.invert_yaxis()
# levels
if levels is None:
sample_data = self._get_data(self.ncfiles[-1])
ticker = mticker.MaxNLocator(nbins=21, symmetric=True)
levels = ticker.tick_values(sample_data.min(), sample_data.max())
self.cmap, self.norm = pycpt.modify.generate_cmap_norm(levels=levels, cm=cmap)
def _update_long_name(self,fname):
with netCDF4.Dataset(fname) as ds:
self.long_name = '{0.long_name} ({0.units})'.format(ds.variables[self.varname])
def init(self,cbarpos='right'):
fname = self.ncfiles[0]
self.data = self._get_data(fname)
self.img = self.ax.pcolormesh(
self.xax,self.zax,
self.data,
cmap=self.cmap,norm=self.norm)
divider = make_axes_locatable(self.ax)
cax = divider.append_axes(cbarpos, size="5%", pad=0.05)
self.cb = self.fig.colorbar(self.img, cax=cax, orientation='vertical',
label = self.long_name)
if self.with_timestamp: _update_timestamp(self, fname)
return self.img
def _get_datashape(self, fname):
with netCDF4.Dataset(fname) as ds:
return ds.variables[self.varname].shape
def _get_data(self, fname):
with netCDF4.Dataset(fname) as ds:
return ds.variables[self.varname][self.t,:self.maxk,self.jj,self.ii] * self.scale
def _make_axes(self,fname):
with netCDF4.Dataset(fname) as ds:
dsvar = ds.variables
self.zax = np.concatenate([[0],dsvar['z_w_bot'][:self.maxk]*1e-2])
try:
self.xax = np.arange(len(self.ii)+1, dtype=float)-0.5
except:
self.xax = np.arange(len(self.jj)+1, dtype=float)-0.5
def __call__(self,i):
fname = self.ncfiles[i]
self.data = self._get_data(fname)
time.sleep(self.pause)
self.img.set_array(self.data.ravel())
if self.with_timestamp: _update_timestamp(self,fname)
return self.img
def plot_map(self):
self.mapfig = plt.figure()
self.mapax = self.mapfig.add_subplot(111)
with netCDF4.Dataset(self.ncfiles[0])as ds:
depth = ds.variables['HU'][:]*1e-2
depth = np.ma.masked_where(depth<=0,depth)
self.mapimg = self.mapax.pcolormesh(depth,cmap='GMT_ocean_r')
ii, jj = self.ii, self.jj
if isinstance(jj, int):
jj = np.ones(len(ii), int) * jj
elif isinstance(ii, int):
ii = np.ones(len(jj), int) * ii
self.mapax.plot(ii,jj,'m',lw=2)
return self.mapfig
|
gpl-2.0
|
murali-munna/scikit-learn
|
sklearn/feature_selection/rfe.py
|
137
|
17066
|
# Authors: Alexandre Gramfort <[email protected]>
# Vincent Michel <[email protected]>
# Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import warnings
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import check_cv
from ..cross_validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params=None, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features / 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.estimator_params:
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
elif hasattr(estimator, 'feature_importances_'):
coefs = estimator.feature_importances_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int or cross-validation generator, optional (default=None)
If int, it is the number of folds.
If None, 3-fold cross-validation is performed by default.
Specific cross-validation objects can also be passed, see
`sklearn.cross_validation module` for details.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None,
estimator_params=None, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. "
"The parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
# Initialization
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
# Determine the number of subsets of features
scores = []
# Cross-validation
for n, (train, test) in enumerate(cv):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params,
verbose=self.verbose - 1)
rfe._fit(X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer))
scores.append(np.array(rfe.scores_[::-1]).reshape(1, -1))
scores = np.sum(np.concatenate(scores, 0), 0)
# The index in 'scores' when 'n_features' features are selected
n_feature_index = np.ceil((n_features - n_features_to_select) /
float(self.step))
n_features_to_select = max(n_features_to_select,
n_features - ((n_feature_index -
np.argmax(scores)) *
self.step))
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to len(cv) - 1
# here, the scores are normalized by len(cv)
self.grid_scores_ = scores / len(cv)
return self
|
bsd-3-clause
|
xuewei4d/scikit-learn
|
examples/applications/plot_topics_extraction_with_nmf_lda.py
|
17
|
5433
|
"""
=======================================================================================
Topic extraction with Non-negative Matrix Factorization and Latent Dirichlet Allocation
=======================================================================================
This is an example of applying :class:`~sklearn.decomposition.NMF` and
:class:`~sklearn.decomposition.LatentDirichletAllocation` on a corpus
of documents and extract additive models of the topic structure of the
corpus. The output is a plot of topics, each represented as bar plot
using top few words based on weights.
Non-negative Matrix Factorization is applied with two different objective
functions: the Frobenius norm, and the generalized Kullback-Leibler divergence.
The latter is equivalent to Probabilistic Latent Semantic Indexing.
The default parameters (n_samples / n_features / n_components) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
"""
# Author: Olivier Grisel <[email protected]>
# Lars Buitinck
# Chyi-Kwei Yau <[email protected]>
# License: BSD 3 clause
from time import time
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_components = 10
n_top_words = 20
def plot_top_words(model, feature_names, n_top_words, title):
fig, axes = plt.subplots(2, 5, figsize=(30, 15), sharex=True)
axes = axes.flatten()
for topic_idx, topic in enumerate(model.components_):
top_features_ind = topic.argsort()[:-n_top_words - 1:-1]
top_features = [feature_names[i] for i in top_features_ind]
weights = topic[top_features_ind]
ax = axes[topic_idx]
ax.barh(top_features, weights, height=0.7)
ax.set_title(f'Topic {topic_idx +1}',
fontdict={'fontsize': 30})
ax.invert_yaxis()
ax.tick_params(axis='both', which='major', labelsize=20)
for i in 'top right left'.split():
ax.spines[i].set_visible(False)
fig.suptitle(title, fontsize=40)
plt.subplots_adjust(top=0.90, bottom=0.05, wspace=0.90, hspace=0.3)
plt.show()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
print("Loading dataset...")
t0 = time()
data, _ = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'),
return_X_y=True)
data_samples = data[:n_samples]
print("done in %0.3fs." % (time() - t0))
# Use tf-idf features for NMF.
print("Extracting tf-idf features for NMF...")
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
t0 = time()
tfidf = tfidf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Use tf (raw term count) features for LDA.
print("Extracting tf features for LDA...")
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
t0 = time()
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
print()
# Fit the NMF model
print("Fitting the NMF model (Frobenius norm) with tf-idf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
t0 = time()
nmf = NMF(n_components=n_components, random_state=1,
alpha=.1, l1_ratio=.5).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
plot_top_words(nmf, tfidf_feature_names, n_top_words,
'Topics in NMF model (Frobenius norm)')
# Fit the NMF model
print('\n' * 2, "Fitting the NMF model (generalized Kullback-Leibler "
"divergence) with tf-idf features, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
t0 = time()
nmf = NMF(n_components=n_components, random_state=1,
beta_loss='kullback-leibler', solver='mu', max_iter=1000, alpha=.1,
l1_ratio=.5).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
plot_top_words(nmf, tfidf_feature_names, n_top_words,
'Topics in NMF model (generalized Kullback-Leibler divergence)')
print('\n' * 2, "Fitting LDA models with tf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_components=n_components, max_iter=5,
learning_method='online',
learning_offset=50.,
random_state=0)
t0 = time()
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
tf_feature_names = tf_vectorizer.get_feature_names()
plot_top_words(lda, tf_feature_names, n_top_words, 'Topics in LDA model')
|
bsd-3-clause
|
lorenzo-desantis/mne-python
|
examples/stats/plot_linear_regression_raw.py
|
12
|
2275
|
"""
========================================
Regression on continuous data (rER[P/F])
========================================
This demonstrates how rERPs/regressing the continuous data is a
generalisation of traditional averaging. If all preprocessing steps
are the same and if no overlap between epochs exists and if all
predictors are binary, regression is virtually identical to traditional
averaging.
If overlap exists and/or predictors are continuous, traditional averaging
is inapplicable, but regression can estimate, including those of
continuous predictors.
Note. This example is based on new code which may still not be
memory-optimized. Be careful when working with a small computer.
rERPs are described in:
Smith, N. J., & Kutas, M. (2015). Regression-based estimation of ERP
waveforms: II. Non-linear effects, overlap correction, and practical
considerations. Psychophysiology, 52(2), 169-189.
"""
# Authors: Jona Sassenhagen <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import spm_face
from mne.stats.regression import linear_regression_raw
# Preprocess data
data_path = spm_face.data_path()
# Load and filter data, set up epochs
raw_fname = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces1_3D_raw.fif'
raw = mne.io.Raw(raw_fname, preload=True) # Take first run
picks = mne.pick_types(raw.info, meg=True, exclude='bads')
raw.filter(1, 45, method='iir')
events = mne.find_events(raw, stim_channel='UPPT001')
event_id = dict(faces=1, scrambled=2)
tmin, tmax = -.1, .5
raw.pick_types(meg=True)
# regular epoching
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, reject=None,
baseline=None, preload=True, verbose=False, decim=4)
# rERF
evokeds = linear_regression_raw(raw, events=events, event_id=event_id,
reject=None, tmin=tmin, tmax=tmax,
decim=4)
# linear_regression_raw returns a dict of evokeds
# select conditions similarly to mne.Epochs objects
# plot both results
cond = "faces"
fig, (ax1, ax2) = plt.subplots(1, 2)
epochs[cond].average().plot(axes=ax1, show=False)
evokeds[cond].plot(axes=ax2, show=False)
ax1.set_title("Traditional averaging")
ax2.set_title("rERF")
plt.show()
|
bsd-3-clause
|
ShawnMurd/MetPy
|
examples/plots/upperair_declarative.py
|
5
|
1882
|
# Copyright (c) 2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
===========================================
Upper Air Analysis using Declarative Syntax
===========================================
The MetPy declarative syntax allows for a simplified interface to creating common
meteorological analyses including upper air observation plots.
"""
########################################
from datetime import datetime
import pandas as pd
from metpy.cbook import get_test_data
import metpy.plots as mpplots
from metpy.units import units
########################################
# **Getting the data**
#
# In this example, data is originally from the Iowa State Upper-air archive
# (https://mesonet.agron.iastate.edu/archive/raob/) available through a Siphon method.
# The data are pre-processed to attach latitude/longitude locations for each RAOB site.
data = pd.read_csv(get_test_data('UPA_obs.csv', as_file_obj=False))
########################################
# **Plotting the data**
#
# Use the declarative plotting interface to create a CONUS upper-air map for 500 hPa
# Plotting the Observations
obs = mpplots.PlotObs()
obs.data = data
obs.time = datetime(1993, 3, 14, 0)
obs.level = 500 * units.hPa
obs.fields = ['temperature', 'dewpoint', 'height']
obs.locations = ['NW', 'SW', 'NE']
obs.formats = [None, None, lambda v: format(v, '.0f')[:3]]
obs.vector_field = ('u_wind', 'v_wind')
obs.reduce_points = 0
# Add map features for the particular panel
panel = mpplots.MapPanel()
panel.layout = (1, 1, 1)
panel.area = (-124, -72, 20, 53)
panel.projection = 'lcc'
panel.layers = ['coastline', 'borders', 'states', 'land', 'ocean']
panel.plots = [obs]
# Collecting panels for complete figure
pc = mpplots.PanelContainer()
pc.size = (15, 10)
pc.panels = [panel]
# Showing the results
pc.show()
|
bsd-3-clause
|
thunderhoser/GewitterGefahr
|
gewittergefahr/gg_io/myrorss_and_mrms_io_test.py
|
1
|
8568
|
"""Unit tests for myrorss_and_mrms_io.py."""
import unittest
import numpy
import pandas
from gewittergefahr.gg_io import myrorss_and_mrms_io
from gewittergefahr.gg_utils import radar_utils
TOLERANCE = 1e-6
LL_SHEAR_NAME = radar_utils.LOW_LEVEL_SHEAR_NAME
LL_SHEAR_NAME_MYRORSS = radar_utils.field_name_new_to_orig(
field_name=radar_utils.LOW_LEVEL_SHEAR_NAME,
data_source_name=radar_utils.MYRORSS_SOURCE_ID)
LL_SHEAR_NAME_MRMS = radar_utils.field_name_new_to_orig(
field_name=radar_utils.LOW_LEVEL_SHEAR_NAME,
data_source_name=radar_utils.MRMS_SOURCE_ID)
# The following constants are used to test _get_pathless_raw_file_pattern and
# _get_pathless_raw_file_name.
FILE_TIME_UNIX_SEC = 1507234802
FILE_SPC_DATE_STRING = '20171005'
PATHLESS_ZIPPED_FILE_NAME = '20171005-202002.netcdf.gz'
PATHLESS_UNZIPPED_FILE_NAME = '20171005-202002.netcdf'
PATHLESS_FILE_PATTERN = '20171005-2020*.netcdf*'
# The following constants are used to test _remove_sentinels_from_sparse_grid.
THESE_GRID_ROWS = numpy.linspace(0, 10, num=11, dtype=int)
THESE_GRID_COLUMNS = numpy.linspace(0, 10, num=11, dtype=int)
THESE_NUM_GRID_CELLS = numpy.linspace(0, 10, num=11, dtype=int)
SENTINEL_VALUES = numpy.array([-99000., -99001.])
RADAR_FIELD_WITH_SENTINELS = radar_utils.VIL_NAME
THESE_RADAR_VALUES = numpy.array(
[SENTINEL_VALUES[0], 1., SENTINEL_VALUES[1], 3., SENTINEL_VALUES[0], 5.,
SENTINEL_VALUES[1], 7., 8., 9., 10.])
THIS_DICTIONARY = {myrorss_and_mrms_io.GRID_ROW_COLUMN: THESE_GRID_ROWS,
myrorss_and_mrms_io.GRID_COLUMN_COLUMN: THESE_GRID_COLUMNS,
myrorss_and_mrms_io.NUM_GRID_CELL_COLUMN:
THESE_NUM_GRID_CELLS,
RADAR_FIELD_WITH_SENTINELS: THESE_RADAR_VALUES}
SPARSE_GRID_TABLE_WITH_SENTINELS = pandas.DataFrame.from_dict(THIS_DICTIONARY)
THESE_SENTINEL_INDICES = numpy.array([0, 2, 4, 6], dtype=int)
SPARSE_GRID_TABLE_NO_SENTINELS = SPARSE_GRID_TABLE_WITH_SENTINELS.drop(
SPARSE_GRID_TABLE_WITH_SENTINELS.index[THESE_SENTINEL_INDICES], axis=0,
inplace=False)
# The following constants are used to test _remove_sentinels_from_full_grid.
FIELD_MATRIX_WITH_SENTINELS = numpy.array([
[0, 1, 2],
[3, SENTINEL_VALUES[0], 5],
[SENTINEL_VALUES[1], 7, 8],
[9, 10, SENTINEL_VALUES[1]],
[12, 13, SENTINEL_VALUES[0]]])
FIELD_MATRIX_NO_SENTINELS = numpy.array([
[0, 1, 2],
[3, numpy.nan, 5],
[numpy.nan, 7, 8],
[9, 10, numpy.nan],
[12, 13, numpy.nan]])
# The following constants are used to test get_relative_dir_for_raw_files.
RELATIVE_DIR_NAME_MYRORSS = '{0:s}/00.25'.format(LL_SHEAR_NAME_MYRORSS)
RELATIVE_DIR_NAME_MRMS = '{0:s}/00.25'.format(LL_SHEAR_NAME_MRMS)
# The following constants are used to test find_raw_file and
# find_raw_file_inexact_time.
TOP_RAW_DIRECTORY_NAME = 'radar'
RAW_FILE_NAME_MYRORSS = (
'radar/2017/20171005/{0:s}/00.25/20171005-202002.netcdf.gz'.format(
LL_SHEAR_NAME_MYRORSS))
RAW_FILE_NAME_MRMS = (
'radar/2017/20171005/{0:s}/00.25/20171005-202002.netcdf.gz'.format(
LL_SHEAR_NAME_MRMS))
class MyrorssAndMrmsIoTests(unittest.TestCase):
"""Each method is a unit test for myrorss_and_mrms_io.py."""
def test_get_pathless_raw_file_pattern(self):
"""Ensures correct output from _get_pathless_raw_file_pattern."""
this_pathless_file_pattern = (
myrorss_and_mrms_io._get_pathless_raw_file_pattern(
FILE_TIME_UNIX_SEC))
self.assertTrue(this_pathless_file_pattern == PATHLESS_FILE_PATTERN)
def test_get_pathless_raw_file_name_zipped(self):
"""Ensures correct output from _get_pathless_raw_file_name.
In this case, generating name for zipped file.
"""
this_pathless_file_name = (
myrorss_and_mrms_io._get_pathless_raw_file_name(
FILE_TIME_UNIX_SEC, zipped=True))
self.assertTrue(this_pathless_file_name == PATHLESS_ZIPPED_FILE_NAME)
def test_get_pathless_raw_file_name_unzipped(self):
"""Ensures correct output from _get_pathless_raw_file_name.
In this case, generating name for unzipped file.
"""
this_pathless_file_name = (
myrorss_and_mrms_io._get_pathless_raw_file_name(
FILE_TIME_UNIX_SEC, zipped=False))
self.assertTrue(this_pathless_file_name == PATHLESS_UNZIPPED_FILE_NAME)
def test_raw_file_name_to_time_zipped(self):
"""Ensures correct output from raw_file_name_to_time.
In this case, input is name of zipped file.
"""
this_time_unix_sec = myrorss_and_mrms_io.raw_file_name_to_time(
PATHLESS_ZIPPED_FILE_NAME)
self.assertTrue(this_time_unix_sec == FILE_TIME_UNIX_SEC)
def test_raw_file_name_to_time_unzipped(self):
"""Ensures correct output from raw_file_name_to_time.
In this case, input is name of unzipped file.
"""
this_time_unix_sec = myrorss_and_mrms_io.raw_file_name_to_time(
PATHLESS_UNZIPPED_FILE_NAME)
self.assertTrue(this_time_unix_sec == FILE_TIME_UNIX_SEC)
def test_remove_sentinels_from_sparse_grid(self):
"""Ensures correct output from _remove_sentinels_from_sparse_grid."""
this_sparse_grid_table = (
myrorss_and_mrms_io._remove_sentinels_from_sparse_grid(
SPARSE_GRID_TABLE_WITH_SENTINELS,
field_name=RADAR_FIELD_WITH_SENTINELS,
sentinel_values=SENTINEL_VALUES))
self.assertTrue(
this_sparse_grid_table.equals(SPARSE_GRID_TABLE_NO_SENTINELS))
def test_remove_sentinels_from_full_grid(self):
"""Ensures correct output from _remove_sentinels_from_full_grid."""
this_field_matrix = (
myrorss_and_mrms_io._remove_sentinels_from_full_grid(
FIELD_MATRIX_WITH_SENTINELS, SENTINEL_VALUES))
self.assertTrue(numpy.allclose(
this_field_matrix, FIELD_MATRIX_NO_SENTINELS, atol=TOLERANCE,
equal_nan=True))
def test_get_relative_dir_for_raw_files_myrorss(self):
"""Ensures correct output from get_relative_dir_for_raw_files.
In this case, data source is MYRORSS.
"""
this_relative_dir_name = (
myrorss_and_mrms_io.get_relative_dir_for_raw_files(
field_name=LL_SHEAR_NAME,
data_source=radar_utils.MYRORSS_SOURCE_ID))
self.assertTrue(this_relative_dir_name == RELATIVE_DIR_NAME_MYRORSS)
def test_get_relative_dir_for_raw_files_mrms(self):
"""Ensures correct output from get_relative_dir_for_raw_files.
In this case, data source is MRMS.
"""
this_relative_dir_name = (
myrorss_and_mrms_io.get_relative_dir_for_raw_files(
field_name=LL_SHEAR_NAME,
data_source=radar_utils.MRMS_SOURCE_ID))
self.assertTrue(this_relative_dir_name == RELATIVE_DIR_NAME_MRMS)
def test_find_raw_file_myrorss(self):
"""Ensures correct output from find_raw_file."""
this_raw_file_name = myrorss_and_mrms_io.find_raw_file(
unix_time_sec=FILE_TIME_UNIX_SEC,
spc_date_string=FILE_SPC_DATE_STRING,
field_name=LL_SHEAR_NAME,
data_source=radar_utils.MYRORSS_SOURCE_ID,
top_directory_name=TOP_RAW_DIRECTORY_NAME,
raise_error_if_missing=False)
self.assertTrue(this_raw_file_name == RAW_FILE_NAME_MYRORSS)
def test_find_raw_file_mrms(self):
"""Ensures correct output from find_raw_file."""
this_raw_file_name = myrorss_and_mrms_io.find_raw_file(
unix_time_sec=FILE_TIME_UNIX_SEC,
spc_date_string=FILE_SPC_DATE_STRING,
field_name=LL_SHEAR_NAME,
data_source=radar_utils.MRMS_SOURCE_ID,
top_directory_name=TOP_RAW_DIRECTORY_NAME,
raise_error_if_missing=False)
self.assertTrue(this_raw_file_name == RAW_FILE_NAME_MRMS)
def test_find_raw_file_inexact_time(self):
"""Ensures correct output from find_raw_file_inexact_time."""
this_raw_file_name = myrorss_and_mrms_io.find_raw_file_inexact_time(
desired_time_unix_sec=FILE_TIME_UNIX_SEC,
spc_date_string=FILE_SPC_DATE_STRING,
field_name=LL_SHEAR_NAME,
data_source=radar_utils.MYRORSS_SOURCE_ID,
top_directory_name=TOP_RAW_DIRECTORY_NAME,
raise_error_if_missing=False)
self.assertTrue(this_raw_file_name is None)
if __name__ == '__main__':
unittest.main()
|
mit
|
pchmieli/h2o-3
|
h2o-py/h2o/model/metrics_base.py
|
2
|
22127
|
from h2o.model.confusion_matrix import ConfusionMatrix
import imp
class MetricsBase(object):
"""
A parent class to house common metrics available for the various Metrics types.
The methods here are available across different model categories, and so appear here.
"""
def __init__(self, metric_json,on=None,algo=""):
self._metric_json = metric_json
self._on_train = False # train and valid and xval are not mutually exclusive -- could have a test. train and valid only make sense at model build time.
self._on_valid = False
self._on_xval = False
self._algo = algo
if on=="training_metrics": self._on_train=True
elif on=="validation_metrics": self._on_valid=True
elif on=="cross_validation_metrics": self._on_xval=True
elif on is None: pass
else: raise ValueError("on expected to be train,valid,or xval. Got: " +str(on))
def __repr__(self):
self.show()
return ""
@staticmethod
def _has(dictionary, key):
return key in dictionary and dictionary[key] is not None
def show(self):
"""
Display a short summary of the metrics.
:return: None
"""
metric_type = self._metric_json['__meta']['schema_type']
types_w_glm = ['ModelMetricsRegressionGLM', 'ModelMetricsBinomialGLM']
types_w_clustering = ['ModelMetricsClustering']
types_w_mult = ['ModelMetricsMultinomial']
types_w_bin = ['ModelMetricsBinomial', 'ModelMetricsBinomialGLM']
types_w_r2 = ['ModelMetricsBinomial', 'ModelMetricsRegression'] + types_w_glm + types_w_mult
types_w_mean_residual_deviance = ['ModelMetricsRegressionGLM', 'ModelMetricsRegression']
types_w_logloss = types_w_bin + types_w_mult
types_w_dim = ["ModelMetricsGLRM"]
print
print metric_type + ": " + self._algo
reported_on = "** Reported on {} data. **"
if self._on_train:
print reported_on.format("train")
elif self._on_valid:
print reported_on.format("validation")
elif self._on_xval:
print reported_on.format("cross-validation")
else:
print reported_on.format("test")
print
print "MSE: " + str(self.mse())
if metric_type in types_w_r2:
print "R^2: " + str(self.r2())
if metric_type in types_w_mean_residual_deviance:
print "Mean Residual Deviance: " + str(self.mean_residual_deviance())
if metric_type in types_w_logloss:
print "LogLoss: " + str(self.logloss())
if metric_type in types_w_glm:
print "Null degrees of freedom: " + str(self.null_degrees_of_freedom())
print "Residual degrees of freedom: " + str(self.residual_degrees_of_freedom())
print "Null deviance: " + str(self.null_deviance())
print "Residual deviance: " + str(self.residual_deviance())
print "AIC: " + str(self.aic())
if metric_type in types_w_bin:
print "AUC: " + str(self.auc())
print "Gini: " + str(self.giniCoef())
self.confusion_matrix().show()
self._metric_json["max_criteria_and_metric_scores"].show()
if metric_type in types_w_mult:
self.confusion_matrix().show()
self.hit_ratio_table().show()
if metric_type in types_w_clustering:
print "Total Within Cluster Sum of Square Error: " + str(self.tot_withinss())
print "Total Sum of Square Error to Grand Mean: " + str(self.totss())
print "Between Cluster Sum of Square Error: " + str(self.betweenss())
self._metric_json['centroid_stats'].show()
if metric_type in types_w_dim:
print "Sum of Squared Error (Numeric): " + str(self.num_err())
print "Misclassification Error (Categorical): " + str(self.cat_err())
def r2(self):
"""
:return: Retrieve the R^2 coefficient for this set of metrics
"""
return self._metric_json["r2"]
def logloss(self):
"""
:return: Retrieve the log loss for this set of metrics.
"""
return self._metric_json["logloss"]
def mean_residual_deviance(self):
"""
:return: Retrieve the mean residual deviance for this set of metrics.
"""
return self._metric_json["mean_residual_deviance"]
def auc(self):
"""
:return: Retrieve the AUC for this set of metrics.
"""
return self._metric_json['AUC']
def aic(self):
"""
:return: Retrieve the AIC for this set of metrics.
"""
return self._metric_json['AIC']
def giniCoef(self):
"""
:return: Retrieve the Gini coefficeint for this set of metrics.
"""
return self._metric_json['Gini']
def mse(self):
"""
:return: Retrieve the MSE for this set of metrics
"""
return self._metric_json['MSE']
def residual_deviance(self):
"""
:return: the residual deviance if the model has residual deviance, or None if no residual deviance.
"""
if MetricsBase._has(self._metric_json, "residual_deviance"):
return self._metric_json["residual_deviance"]
return None
def residual_degrees_of_freedom(self):
"""
:return: the residual dof if the model has residual deviance, or None if no residual dof.
"""
if MetricsBase._has(self._metric_json, "residual_degrees_of_freedom"):
return self._metric_json["residual_degrees_of_freedom"]
return None
def null_deviance(self):
"""
:return: the null deviance if the model has residual deviance, or None if no null deviance.
"""
if MetricsBase._has(self._metric_json, "null_deviance"):
return self._metric_json["null_deviance"]
return None
def null_degrees_of_freedom(self):
"""
:return: the null dof if the model has residual deviance, or None if no null dof.
"""
if MetricsBase._has(self._metric_json, "null_degrees_of_freedom"):
return self._metric_json["null_degrees_of_freedom"]
return None
class H2ORegressionModelMetrics(MetricsBase):
"""
This class provides an API for inspecting the metrics returned by a regression model.
It is possible to retrieve the R^2 (1 - MSE/variance) and MSE
"""
def __init__(self,metric_json,on=None,algo=""):
super(H2ORegressionModelMetrics, self).__init__(metric_json, on, algo)
class H2OClusteringModelMetrics(MetricsBase):
def __init__(self, metric_json, on=None, algo=""):
super(H2OClusteringModelMetrics, self).__init__(metric_json, on, algo)
def tot_withinss(self):
"""
:return: the Total Within Cluster Sum-of-Square Error, or None if not present.
"""
if MetricsBase._has(self._metric_json, "tot_withinss"):
return self._metric_json["tot_withinss"]
return None
def totss(self):
"""
:return: the Total Sum-of-Square Error to Grand Mean, or None if not present.
"""
if MetricsBase._has(self._metric_json, "totss"):
return self._metric_json["totss"]
return None
def betweenss(self):
"""
:return: the Between Cluster Sum-of-Square Error, or None if not present.
"""
if MetricsBase._has(self._metric_json, "betweenss"):
return self._metric_json["betweenss"]
return None
class H2OMultinomialModelMetrics(MetricsBase):
def __init__(self, metric_json, on=None, algo=""):
super(H2OMultinomialModelMetrics, self).__init__(metric_json, on, algo)
def confusion_matrix(self):
"""
Returns a confusion matrix based of H2O's default prediction threshold for a dataset
"""
return self._metric_json['cm']['table']
def hit_ratio_table(self):
"""
Retrieve the Hit Ratios
"""
return self._metric_json['hit_ratio_table']
class H2OBinomialModelMetrics(MetricsBase):
"""
This class is essentially an API for the AUC object.
This class contains methods for inspecting the AUC for different criteria.
To input the different criteria, use the static variable `criteria`
"""
def __init__(self, metric_json, on=None, algo=""):
"""
Create a new Binomial Metrics object (essentially a wrapper around some json)
:param metric_json: A blob of json holding all of the needed information
:param on_train: Metrics built on training data (default is False)
:param on_valid: Metrics built on validation data (default is False)
:param on_xval: Metrics built on cross validation data (default is False)
:param algo: The algorithm the metrics are based off of (e.g. deeplearning, gbm, etc.)
:return: A new H2OBinomialModelMetrics object.
"""
super(H2OBinomialModelMetrics, self).__init__(metric_json, on, algo)
def F1(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: The F1 for the given set of thresholds.
"""
return self.metric("f1", thresholds=thresholds)
def F2(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: The F2 for this set of metrics and thresholds
"""
return self.metric("f2", thresholds=thresholds)
def F0point5(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: The F0point5 for this set of metrics and thresholds.
"""
return self.metric("f0point5", thresholds=thresholds)
def accuracy(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: The accuracy for this set of metrics and thresholds
"""
return self.metric("accuracy", thresholds=thresholds)
def error(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: The error for this set of metrics and thresholds.
"""
return 1 - self.metric("accuracy", thresholds=thresholds)
def precision(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: The precision for this set of metrics and thresholds.
"""
return self.metric("precision", thresholds=thresholds)
def tpr(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: The True Postive Rate
"""
return self.metric("tpr", thresholds=thresholds)
def tnr(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: The True Negative Rate
"""
return self.metric("tnr", thresholds=thresholds)
def fnr(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: The False Negative Rate
"""
return self.metric("fnr", thresholds=thresholds)
def fpr(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: The False Positive Rate
"""
return self.metric("fpr", thresholds=thresholds)
def recall(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: Recall for this set of metrics and thresholds
"""
return self.metric("tpr", thresholds=thresholds)
def sensitivity(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: Sensitivity or True Positive Rate for this set of metrics and thresholds
"""
return self.metric("tpr", thresholds=thresholds)
def fallout(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: The fallout or False Positive Rate for this set of metrics and thresholds
"""
return self.metric("fpr", thresholds=thresholds)
def missrate(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: THe missrate or False Negative Rate.
"""
return self.metric("fnr", thresholds=thresholds)
def specificity(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: The specificity or True Negative Rate.
"""
return self.metric("tnr", thresholds=thresholds)
def mcc(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: The absolute MCC (a value between 0 and 1, 0 being totally dissimilar, 1 being identical)
"""
return self.metric("absolute_MCC", thresholds=thresholds)
def max_per_class_error(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: Return 1 - min_per_class_accuracy
"""
return 1-self.metric("min_per_class_accuracy", thresholds=thresholds)
def metric(self, metric, thresholds=None):
"""
:param metric: The desired metric
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: The set of metrics for the list of thresholds
"""
if not thresholds: thresholds=[self.find_threshold_by_max_metric(metric)]
if not isinstance(thresholds,list):
raise ValueError("thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99])")
thresh2d = self._metric_json['thresholds_and_metric_scores']
midx = thresh2d.col_header.index(metric)
metrics = []
for t in thresholds:
idx = self.find_idx_by_threshold(t)
row = thresh2d.cell_values[idx]
metrics.append([t,row[midx]])
return metrics
def plot(self, type="roc", **kwargs):
"""
Produce the desired metric plot
:param type: the type of metric plot (currently, only ROC supported)
:param show: if False, the plot is not shown. matplotlib show method is blocking.
:return: None
"""
# check for matplotlib. exit if absent.
try:
imp.find_module('matplotlib')
import matplotlib
if 'server' in kwargs.keys() and kwargs['server']: matplotlib.use('Agg', warn=False)
import matplotlib.pyplot as plt
except ImportError:
print "matplotlib is required for this function!"
return
# TODO: add more types (i.e. cutoffs)
if type not in ["roc"]: raise ValueError("type {} is not supported".format(type))
if type == "roc":
plt.xlabel('False Positive Rate (FPR)')
plt.ylabel('True Positive Rate (TPR)')
plt.title('ROC Curve')
plt.text(0.5, 0.5, r'AUC={0:.4f}'.format(self._metric_json["AUC"]))
plt.plot(self.fprs, self.tprs, 'b--')
plt.axis([0, 1, 0, 1])
if not ('server' in kwargs.keys() and kwargs['server']): plt.show()
@property
def fprs(self):
"""
Return all false positive rates for all threshold values.
:return: a list of false positive rates.
"""
fpr_idx = self._metric_json["thresholds_and_metric_scores"].col_header.index("fpr")
fprs = [x[fpr_idx] for x in self._metric_json["thresholds_and_metric_scores"].cell_values]
return fprs
@property
def tprs(self):
"""
Return all true positive rates for all threshold values.
:return: a list of true positive rates.
"""
tpr_idx = self._metric_json["thresholds_and_metric_scores"].col_header.index("tpr")
tprs = [y[tpr_idx] for y in self._metric_json["thresholds_and_metric_scores"].cell_values]
return tprs
def confusion_matrix(self, metrics=None, thresholds=None):
"""
Get the confusion matrix for the specified metric
:param metrics: A string (or list of strings) in {"min_per_class_accuracy", "absolute_MCC", "tnr", "fnr", "fpr", "tpr", "precision", "accuracy", "f0point5", "f2", "f1"}
:param thresholds: A value (or list of values) between 0 and 1
:return: a list of ConfusionMatrix objects (if there are more than one to return), or a single ConfusionMatrix (if there is only one)
"""
# make lists out of metrics and thresholds arguments
if metrics is None and thresholds is None: metrics = ["f1"]
if isinstance(metrics, list): metrics_list = metrics
elif metrics is None: metrics_list = []
else: metrics_list = [metrics]
if isinstance(thresholds, list): thresholds_list = thresholds
elif thresholds is None: thresholds_list = []
else: thresholds_list = [thresholds]
# error check the metrics_list and thresholds_list
if not all(isinstance(t, (int, float, long)) for t in thresholds_list) or \
not all(t >= 0 or t <= 1 for t in thresholds_list):
raise ValueError("All thresholds must be numbers between 0 and 1 (inclusive).")
if not all(m in ["min_per_class_accuracy", "absolute_MCC", "precision", "accuracy", "f0point5", "f2", "f1"] for m in metrics_list):
raise ValueError("The only allowable metrics are min_per_class_accuracy, absolute_MCC, precision, accuracy, f0point5, f2, f1")
# make one big list that combines the thresholds and metric-thresholds
metrics_thresholds = [self.find_threshold_by_max_metric(m) for m in metrics_list]
for mt in metrics_thresholds:
thresholds_list.append(mt)
thresh2d = self._metric_json['thresholds_and_metric_scores']
actual_thresholds = [float(e[0]) for i,e in enumerate(thresh2d.cell_values)]
cms = []
for t in thresholds_list:
idx = self.find_idx_by_threshold(t)
row = thresh2d.cell_values[idx]
tns = row[8]
fns = row[9]
fps = row[10]
tps = row[11]
p = tps + fns
n = tns + fps
c0 = n - fps
c1 = p - tps
if t in metrics_thresholds:
m = metrics_list[metrics_thresholds.index(t)]
table_header = "Confusion Matrix (Act/Pred) for max " + m + " @ threshold = " + str(actual_thresholds[idx])
else: table_header = "Confusion Matrix (Act/Pred) @ threshold = " + str(actual_thresholds[idx])
cms.append(ConfusionMatrix(cm=[[c0,fps],[c1,tps]], domains=self._metric_json['domain'],
table_header=table_header))
if len(cms) == 1: return cms[0]
else: return cms
def find_threshold_by_max_metric(self,metric):
"""
:param metric: A string in {"min_per_class_accuracy", "absolute_MCC", "precision", "accuracy", "f0point5", "f2", "f1"}
:return: the threshold at which the given metric is maximum.
"""
crit2d = self._metric_json['max_criteria_and_metric_scores']
for e in crit2d.cell_values:
if e[0]=="max "+metric:
return e[1]
raise ValueError("No metric "+str(metric))
def find_idx_by_threshold(self,threshold):
"""
Retrieve the index in this metric's threshold list at which the given threshold is located.
:param threshold: Find the index of this input threshold.
:return: Return the index or throw a ValueError if no such index can be found.
"""
if not isinstance(threshold,float):
raise ValueError("Expected a float but got a "+type(threshold))
thresh2d = self._metric_json['thresholds_and_metric_scores']
for i,e in enumerate(thresh2d.cell_values):
t = float(e[0])
if abs(t-threshold) < 0.00000001 * max(t,threshold):
return i
if threshold >= 0 and threshold <= 1:
thresholds = [float(e[0]) for i,e in enumerate(thresh2d.cell_values)]
threshold_diffs = [abs(t - threshold) for t in thresholds]
closest_idx = threshold_diffs.index(min(threshold_diffs))
closest_threshold = thresholds[closest_idx]
print "Could not find exact threshold {0}; using closest threshold found {1}." \
.format(threshold, closest_threshold)
return closest_idx
raise ValueError("Threshold must be between 0 and 1, but got {0} ".format(threshold))
class H2OAutoEncoderModelMetrics(MetricsBase):
def __init__(self, metric_json, on=None, algo=""):
super(H2OAutoEncoderModelMetrics, self).__init__(metric_json, on, algo)
class H2ODimReductionModelMetrics(MetricsBase):
def __init__(self, metric_json, on=None, algo=""):
super(H2ODimReductionModelMetrics, self).__init__(metric_json, on, algo)
def num_err(self):
"""
:return: the Sum of Squared Error over non-missing numeric entries, or None if not present.
"""
if MetricsBase._has(self._metric_json, "numerr"):
return self._metric_json["numerr"]
return None
def cat_err(self):
"""
:return: the Number of Misclassified categories over non-missing categorical entries, or None if not present.
"""
if MetricsBase._has(self._metric_json, "caterr"):
return self._metric_json["caterr"]
return None
|
apache-2.0
|
zhmz90/hep_ml
|
hep_ml/losses.py
|
3
|
36874
|
"""
**hep_ml.losses** contains different loss functions to use in gradient boosting.
Apart from standard classification losses, **hep_ml** contains losses for uniform classification
(see :class:`BinFlatnessLossFunction`, :class:`KnnFlatnessLossFunction`, :class:`KnnAdaLossFunction`)
and for ranking (see :class:`RankBoostLossFunction`)
**Interface**
Loss functions inside **hep_ml** are stateful estimators and require initial fitting,
which is done automatically inside gradient boosting.
All loss function should be derived from AbstractLossFunction and implement this interface.
Examples
________
Training gradient boosting, optimizing LogLoss and using all features
>>> from hep_ml.gradientboosting import UGradientBoostingClassifier, LogLossFunction
>>> classifier = UGradientBoostingClassifier(loss=LogLossFunction(), n_estimators=100)
>>> classifier.fit(X, y, sample_weight=sample_weight)
Using composite loss function and subsampling:
>>> loss = CompositeLossFunction()
>>> classifier = UGradientBoostingClassifier(loss=loss, subsample=0.5)
To get uniform predictions in mass in background (note that mass should not present in features):
>>> loss = BinFlatnessLossFunction(uniform_features=['mass'], uniform_label=0, train_features=['pt', 'flight_time'])
>>> classifier = UGradientBoostingClassifier(loss=loss)
To get uniform predictions in both signal and background:
>>> loss = BinFlatnessLossFunction(uniform_features=['mass'], uniform_label=[0, 1], train_features=['pt', 'flight_time'])
>>> classifier = UGradientBoostingClassifier(loss=loss)
"""
from __future__ import division, print_function, absolute_import
import numbers
import warnings
import numpy
import pandas
from scipy import sparse
from scipy.special import expit
from sklearn.utils.validation import check_random_state
from sklearn.base import BaseEstimator
from .commonutils import compute_knn_indices_of_signal, check_sample_weight, check_uniform_label, weighted_quantile
from .metrics_utils import bin_to_group_indices, compute_bin_indices, compute_group_weights, \
group_indices_to_groups_matrix
__author__ = 'Alex Rogozhnikov'
__all__ = [
'AbstractLossFunction',
'MSELossFunction',
'MAELossFunction',
'LogLossFunction',
'AdaLossFunction',
'CompositeLossFunction',
'BinFlatnessLossFunction',
'KnnFlatnessLossFunction',
'KnnAdaLossFunction',
'RankBoostLossFunction'
]
def _compute_positions(y_pred, sample_weight):
"""
For each event computes it position among other events by prediction.
position = (weighted) part of elements with lower predictions => position belongs to [0, 1]
This function is very close to `scipy.stats.rankdata`, but supports weights.
"""
order = numpy.argsort(y_pred)
ordered_weights = sample_weight[order]
ordered_weights /= float(numpy.sum(ordered_weights))
efficiencies = (numpy.cumsum(ordered_weights) - 0.5 * ordered_weights)
return efficiencies[numpy.argsort(order)]
class AbstractLossFunction(BaseEstimator):
"""
This is base class for loss functions used in `hep_ml`.
Main differences compared to `scikit-learn` loss functions:
1. losses are stateful, and may require fitting of training data before usage.
2. thus, when computing gradient, hessian, one shall provide predictions of all events.
3. losses are object that shall be passed as estimators to gradient boosting (see examples).
4. only two-class case is supported, and different classes may have different role and meaning.
"""
def fit(self, X, y, sample_weight):
""" This method is optional, it is called before all the others."""
return self
def negative_gradient(self, y_pred):
"""The y_pred should contain all the events passed to `fit` method,
moreover, the order should be the same"""
raise NotImplementedError()
def __call__(self, y_pred):
"""The y_pred should contain all the events passed to `fit` method,
moreover, the order should be the same"""
raise NotImplementedError()
def prepare_tree_params(self, y_pred):
"""Prepares parameters for regression tree that minimizes MSE
:param y_pred: contains predictions for all the events passed to `fit` method,
moreover, the order should be the same
:return: tuple (tree_target, tree_weight) with target and weight to be used in decision tree
"""
return self.negative_gradient(y_pred), numpy.ones(len(y_pred))
def prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred):
"""
Method for pruning. Loss function can prepare better values for leaves
:param terminal_regions: indices of terminal regions of each event.
:param leaf_values: numpy.array, current mapping of leaf indices to prediction values.
:param y_pred: predictions before adding new tree.
:return: numpy.array with new prediction values for all leaves.
"""
return leaf_values
def compute_optimal_step(self, y_pred):
"""
Compute optimal global step. This method is typically used to make optimal step
before fitting trees to reduce variance.
:param y_pred: initial predictions, numpy.array of shape [n_samples]
:return: float
"""
return 0.
class HessianLossFunction(AbstractLossFunction):
"""Loss function with diagonal hessian, provides uses Newton-Raphson step to update trees. """
def __init__(self, regularization=5.):
"""
:param regularization: float, penalty for leaves with few events,
corresponds roughly to the number of added events of both classes to each leaf.
"""
self.regularization = regularization
def fit(self, X, y, sample_weight):
self.regularization_ = self.regularization * numpy.mean(sample_weight)
return self
def hessian(self, y_pred):
""" Returns diagonal of hessian matrix.
:param y_pred: numpy.array of shape [n_samples] with events passed in the same order as in `fit`.
:return: numpy.array of shape [n_sampels] with second derivatives with respect to each prediction.
"""
raise NotImplementedError('Override this method in loss function.')
def prepare_tree_params(self, y_pred):
grad = self.negative_gradient(y_pred)
hess = self.hessian(y_pred) + 0.01
return grad / hess, hess
def prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred):
""" This expression comes from optimization of second-order approximation of loss function."""
min_length = len(leaf_values)
nominators = numpy.bincount(terminal_regions, weights=self.negative_gradient(y_pred), minlength=min_length)
denominators = numpy.bincount(terminal_regions, weights=self.hessian(y_pred), minlength=min_length)
return nominators / (denominators + self.regularization_)
def compute_optimal_step(self, y_pred):
"""
Optimal step is computed using Newton-Raphson algorithm (10 iterations).
:param y_pred: predictions (usually, zeros)
:return: float
"""
terminal_regions = numpy.zeros(len(y_pred), dtype='int')
leaf_values = numpy.zeros(shape=1)
step = 0.
for _ in range(10):
step_ = self.prepare_new_leaves_values(terminal_regions, leaf_values=leaf_values, y_pred=y_pred + step)[0]
step += 0.5 * step_
return step
# region Classification losses
class AdaLossFunction(HessianLossFunction):
""" AdaLossFunction is the same as Exponential Loss Function (aka exploss) """
def fit(self, X, y, sample_weight):
self.sample_weight = check_sample_weight(y, sample_weight=sample_weight,
normalize=True, normalize_by_class=True)
self.y_signed = 2 * y - 1
HessianLossFunction.fit(self, X, y, sample_weight=self.sample_weight)
return self
def __call__(self, y_pred):
return numpy.sum(self.sample_weight * numpy.exp(- self.y_signed * y_pred))
def negative_gradient(self, y_pred):
return self.y_signed * self.sample_weight * numpy.exp(- self.y_signed * y_pred)
def hessian(self, y_pred):
return self.sample_weight * numpy.exp(- self.y_signed * y_pred)
def prepare_tree_params(self, y_pred):
return self.y_signed, self.hessian(y_pred)
class LogLossFunction(HessianLossFunction):
"""Logistic loss function (logloss), aka binomial deviance, aka cross-entropy,
aka log-likelihood loss.
"""
def fit(self, X, y, sample_weight):
self.sample_weight = check_sample_weight(y, sample_weight=sample_weight,
normalize=True, normalize_by_class=True)
self.y_signed = 2 * y - 1
HessianLossFunction.fit(self, X, y, sample_weight=self.sample_weight)
return self
def __call__(self, y_pred):
return numpy.sum(self.sample_weight * numpy.logaddexp(0, - self.y_signed * y_pred))
def negative_gradient(self, y_pred):
return self.y_signed * self.sample_weight * expit(- self.y_signed * y_pred)
def hessian(self, y_pred):
expits = expit(self.y_signed * y_pred)
return self.sample_weight * expits * (1 - expits)
def prepare_tree_params(self, y_pred):
return self.y_signed * expit(- self.y_signed * y_pred), self.sample_weight
class CompositeLossFunction(HessianLossFunction):
"""
Composite loss function is defined as exploss for backgorund events and logloss for signal with proper constants.
Such kind of loss functions is very useful to optimize AMS or in situations where very clean signal is expected.
"""
def fit(self, X, y, sample_weight):
self.y = y
self.sample_weight = check_sample_weight(y, sample_weight=sample_weight,
normalize=True, normalize_by_class=True)
self.y_signed = 2 * y - 1
self.sig_w = (y == 1) * self.sample_weight
self.bck_w = (y == 0) * self.sample_weight
HessianLossFunction.fit(self, X, y, sample_weight=self.sample_weight)
return self
def __call__(self, y_pred):
result = numpy.sum(self.sig_w * numpy.logaddexp(0, -y_pred))
result += numpy.sum(self.bck_w * numpy.exp(0.5 * y_pred))
return result
def negative_gradient(self, y_pred):
result = self.sig_w * expit(- y_pred)
result -= 0.5 * self.bck_w * numpy.exp(0.5 * y_pred)
return result
def hessian(self, y_pred):
expits = expit(- y_pred)
return self.sig_w * expits * (1 - expits) + self.bck_w * 0.25 * numpy.exp(0.5 * y_pred)
# endregion
# region Regression Losses
class MSELossFunction(HessianLossFunction):
r""" Mean squared error loss function, used for regression.
:math:`\text{loss} = \sum_i (y_i - \hat{y}_i)^2`
"""
def fit(self, X, y, sample_weight):
self.y = y
self.sample_weight = check_sample_weight(y, sample_weight=sample_weight, normalize=True)
HessianLossFunction.fit(self, X, y, sample_weight=sample_weight)
return self
def __call__(self, y_pred):
return 0.5 * numpy.sum(self.sample_weight * (self.y - y_pred) ** 2)
def negative_gradient(self, y_pred):
return self.sample_weight * (self.y - y_pred)
def hessian(self, y_pred):
return self.sample_weight
def prepare_tree_params(self, y_pred):
return self.y - y_pred, self.sample_weight
def compute_optimal_step(self, y_pred):
return numpy.average(self.y - y_pred, weights=self.sample_weight)
class MAELossFunction(AbstractLossFunction):
r""" Mean absolute error loss function, used for regression.
:math:`\text{loss} = \sum_i |y_i - \hat{y}_i|`
"""
def fit(self, X, y, sample_weight):
self.y = y
self.sample_weight = check_sample_weight(y, sample_weight=sample_weight, normalize=True)
return self
def __call__(self, y_pred):
return 0.5 * numpy.sum(self.sample_weight * numpy.abs(self.y - y_pred))
def negative_gradient(self, y_pred):
return self.sample_weight * numpy.sign(self.y - y_pred)
def prepare_tree_params(self, y_pred):
return numpy.sign(self.y - y_pred), self.sample_weight
def prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred):
# TODO use weighted median
new_leaf_values = numpy.zeros(len(leaf_values), dtype='float')
target = (self.y - y_pred)
for terminal_region in range(len(leaf_values)):
values = target[terminal_regions == terminal_region]
values = numpy.insert(values, [0], [0])
new_leaf_values[terminal_region] = numpy.median(values)
return new_leaf_values
def compute_optimal_step(self, y_pred):
return weighted_quantile(self.y - y_pred, quantiles=[0.5], sample_weight=self.sample_weight)[0]
# endregion RegressionLosses
class RankBoostLossFunction(HessianLossFunction):
def __init__(self, request_column, penalty_power=1., update_iterations=1):
r"""RankBoostLossFunction is target of optimization in RankBoost [RB]_ algorithm,
which was developed for ranking and introduces penalties for wrong order of predictions.
However, this implementation goes further and there is selection of optimal leaf values based
on iterative procedure. This implementation also uses matrix decomposition of loss function,
which is very effective, when labels are from some very limited set (usually it is 0, 1, 2, 3, 4)
:math:`\text{loss} = \sum_{ij} w_{ij} exp(pred_i - pred_j)`,
:math:`w_{ij} = ( \alpha + \beta * [query_i = query_j]) R_{label_i, label_j}`, where
:math:`R_{ij} = 0` if :math:`i \leq j`, else :math:`R_{ij} = (i - j)^{p}`
:param str request_column: name of column with search query ids. The higher attention is payed
to samples with same query.
:param float penalty_power: describes dependence of penalty on the difference between target labels.
:param int update_iterations: number of minimization steps to provide optimal values.
.. [RB] Y. Freund et al. An Efficient Boosting Algorithm for Combining Preferences
"""
self.update_terations = update_iterations
self.penalty_power = penalty_power
self.request_column = request_column
HessianLossFunction.__init__(self, regularization=0.1)
def fit(self, X, y, sample_weight):
self.queries = X[self.request_column]
self.y = y
self.possible_queries, normed_queries = numpy.unique(self.queries, return_inverse=True)
self.possible_ranks, normed_ranks = numpy.unique(self.y, return_inverse=True)
self.lookups = [normed_ranks, normed_queries * len(self.possible_ranks) + normed_ranks]
self.minlengths = [len(self.possible_ranks), len(self.possible_ranks) * len(self.possible_queries)]
self.rank_penalties = numpy.zeros([len(self.possible_ranks), len(self.possible_ranks)], dtype=float)
for r1 in self.possible_ranks:
for r2 in self.possible_ranks:
if r1 < r2:
self.rank_penalties[r1, r2] = (r2 - r1) ** self.penalty_power
self.penalty_matrices = []
self.penalty_matrices.append(self.rank_penalties / numpy.sqrt(1 + len(y)))
n_queries = numpy.bincount(normed_queries)
assert len(n_queries) == len(self.possible_queries)
self.penalty_matrices.append(
sparse.block_diag([self.rank_penalties * 1. / numpy.sqrt(1 + nq) for nq in n_queries]))
HessianLossFunction.fit(self, X, y, sample_weight=sample_weight)
def __call__(self, y_pred):
y_pred -= y_pred.mean()
pos_exponent = numpy.exp(y_pred)
neg_exponent = numpy.exp(-y_pred)
result = 0.
for lookup, length, penalty_matrix in zip(self.lookups, self.minlengths, self.penalty_matrices):
pos_stats = numpy.bincount(lookup, weights=pos_exponent, minlength=length)
neg_stats = numpy.bincount(lookup, weights=neg_exponent, minlength=length)
result += pos_stats.T.dot(penalty_matrix.dot(neg_stats))
return result
def negative_gradient(self, y_pred):
y_pred -= y_pred.mean()
pos_exponent = numpy.exp(y_pred)
neg_exponent = numpy.exp(-y_pred)
gradient = numpy.zeros(len(y_pred), dtype=float)
for lookup, length, penalty_matrix in zip(self.lookups, self.minlengths, self.penalty_matrices):
pos_stats = numpy.bincount(lookup, weights=pos_exponent, minlength=length)
neg_stats = numpy.bincount(lookup, weights=neg_exponent, minlength=length)
gradient += pos_exponent * penalty_matrix.dot(neg_stats)[lookup]
gradient -= neg_exponent * penalty_matrix.T.dot(pos_stats)[lookup]
return - gradient
def hessian(self, y_pred):
y_pred -= y_pred.mean()
pos_exponent = numpy.exp(y_pred)
neg_exponent = numpy.exp(-y_pred)
result = numpy.zeros(len(y_pred), dtype=float)
for lookup, length, penalty_matrix in zip(self.lookups, self.minlengths, self.penalty_matrices):
pos_stats = numpy.bincount(lookup, weights=pos_exponent, minlength=length)
neg_stats = numpy.bincount(lookup, weights=neg_exponent, minlength=length)
result += pos_exponent * penalty_matrix.dot(neg_stats)[lookup]
result += neg_exponent * penalty_matrix.T.dot(pos_stats)[lookup]
return result
def prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred):
leaves_values = numpy.zeros(len(leaf_values))
for _ in range(self.update_terations):
y_test = y_pred + leaves_values[terminal_regions]
new_leaves_values = self._prepare_new_leaves_values(terminal_regions, leaves_values, y_test)
leaves_values = 0.5 * new_leaves_values + leaves_values
return leaves_values
def _prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred):
"""
For each event we shall represent loss as w_plus * e^{pred} + w_minus * e^{-pred},
then we are able to construct optimal step.
Pay attention: this is not an optimal, since we are ignoring,
that some events belong to the same leaf
"""
pos_exponent = numpy.exp(y_pred)
neg_exponent = numpy.exp(-y_pred)
w_plus = numpy.zeros(len(y_pred), dtype=float)
w_minus = numpy.zeros(len(y_pred), dtype=float)
for lookup, length, penalty_matrix in zip(self.lookups, self.minlengths, self.penalty_matrices):
pos_stats = numpy.bincount(lookup, weights=pos_exponent, minlength=length)
neg_stats = numpy.bincount(lookup, weights=neg_exponent, minlength=length)
w_plus += penalty_matrix.dot(neg_stats)[lookup]
w_minus += penalty_matrix.T.dot(pos_stats)[lookup]
w_plus_leaf = numpy.bincount(terminal_regions, weights=w_plus * pos_exponent) + self.regularization
w_minus_leaf = numpy.bincount(terminal_regions, weights=w_minus * neg_exponent) + self.regularization
return 0.5 * numpy.log(w_minus_leaf / w_plus_leaf)
# region MatrixLossFunction
class AbstractMatrixLossFunction(HessianLossFunction):
def __init__(self, uniform_features, regularization=5.):
r"""AbstractMatrixLossFunction is a base class to be inherited by other loss functions,
which choose the particular A matrix and w vector. The formula of loss is:
\text{loss} = \sum_i w_i * exp(- \sum_j a_ij y_j score_j)
"""
self.uniform_features = uniform_features
# real matrix and vector will be computed during fitting
self.A = None
self.A_t = None
self.w = None
HessianLossFunction.__init__(self, regularization=regularization)
def fit(self, X, y, sample_weight):
"""This method is used to compute A matrix and w based on train dataset"""
assert len(X) == len(y), "different size of arrays"
A, w = self.compute_parameters(X, y, sample_weight)
self.A = sparse.csr_matrix(A)
self.A_t = sparse.csr_matrix(self.A.transpose())
self.A_t_sq = self.A_t.multiply(self.A_t)
self.w = numpy.array(w)
assert A.shape[0] == len(w), "inconsistent sizes"
assert A.shape[1] == len(X), "wrong size of matrix"
self.y_signed = numpy.array(2 * y - 1)
HessianLossFunction.fit(self, X, y, sample_weight=sample_weight)
return self
def __call__(self, y_pred):
"""Computing the loss itself"""
assert len(y_pred) == self.A.shape[1], "something is wrong with sizes"
exponents = numpy.exp(- self.A.dot(self.y_signed * y_pred))
return numpy.sum(self.w * exponents)
def negative_gradient(self, y_pred):
"""Computing negative gradient"""
assert len(y_pred) == self.A.shape[1], "something is wrong with sizes"
exponents = numpy.exp(- self.A.dot(self.y_signed * y_pred))
result = self.A_t.dot(self.w * exponents) * self.y_signed
return result
def hessian(self, y_pred):
assert len(y_pred) == self.A.shape[1], 'something wrong with sizes'
exponents = numpy.exp(- self.A.dot(self.y_signed * y_pred))
result = self.A_t_sq.dot(self.w * exponents)
return result
def compute_parameters(self, trainX, trainY, trainW):
"""This method should be overloaded in descendant, and should return A, w (matrix and vector)"""
raise NotImplementedError()
def prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred):
exponents = numpy.exp(- self.A.dot(self.y_signed * y_pred))
# current approach uses Newton-Raphson step
# TODO compare with iterative suboptimal choice of value, based on exp(a x) ~ a exp(x)
regions_matrix = sparse.csc_matrix((self.y_signed, [numpy.arange(len(self.y_signed)), terminal_regions]))
# Z is matrix of shape [n_exponents, n_terminal_regions]
# with contributions of each terminal region to each exponent
Z = self.A.dot(regions_matrix)
Z = Z.T
nominator = Z.dot(self.w * exponents)
denominator = Z.multiply(Z).dot(self.w * exponents)
return nominator / (denominator + 1e-5)
class KnnAdaLossFunction(AbstractMatrixLossFunction):
def __init__(self, uniform_features, uniform_label, knn=10, row_norm=1.):
r"""Modification of AdaLoss to achieve uniformity of predictions
:math:`\text{loss} = \sum_i w_i * exp(- \sum_j a_{ij} y_j score_j)`
`A` matrix is square, each row corresponds to a single event in train dataset, in each row we put ones
to the closest neighbours if this event from uniform class.
See [BU]_ for details.
:param list[str] uniform_features: the features, along which uniformity is desired
:param int|list[int] uniform_label: the label (labels) of 'uniform classes'
:param int knn: the number of nonzero elements in the row, corresponding to event in 'uniform class'
.. [BU] A. Rogozhnikov et al, New approaches for boosting to uniformity
http://arxiv.org/abs/1410.4140
"""
self.knn = knn
self.row_norm = row_norm
self.uniform_label = check_uniform_label(uniform_label)
AbstractMatrixLossFunction.__init__(self, uniform_features)
def compute_parameters(self, trainX, trainY, trainW):
A_parts = []
w_parts = []
for label in self.uniform_label:
label_mask = numpy.array(trainY == label)
n_label = numpy.sum(label_mask)
knn_indices = compute_knn_indices_of_signal(trainX[self.uniform_features], label_mask, self.knn)
knn_indices = knn_indices[label_mask, :]
ind_ptr = numpy.arange(0, n_label * self.knn + 1, self.knn)
column_indices = knn_indices.flatten()
data = numpy.ones(n_label * self.knn, dtype=float) * self.row_norm / self.knn
A_part = sparse.csr_matrix((data, column_indices, ind_ptr), shape=[n_label, len(trainX)])
w_part = numpy.mean(numpy.take(trainW, knn_indices), axis=1)
assert A_part.shape[0] == len(w_part)
A_parts.append(A_part)
w_parts.append(w_part)
for label in set(trainY) - set(self.uniform_label):
label_mask = trainY == label
n_label = numpy.sum(label_mask)
ind_ptr = numpy.arange(0, n_label + 1)
column_indices = numpy.where(label_mask)[0].flatten()
data = numpy.ones(n_label, dtype=float) * self.row_norm
A_part = sparse.csr_matrix((data, column_indices, ind_ptr), shape=[n_label, len(trainX)])
w_part = trainW[label_mask]
A_parts.append(A_part)
w_parts.append(w_part)
A = sparse.vstack(A_parts, format='csr', dtype=float)
w = numpy.concatenate(w_parts)
assert A.shape == (len(trainX), len(trainX))
return A, w
# endregion
# region ReweightLossFunction
# Mathematically at each stage we
# 0. recompute weights
# 1. normalize ratio between distributions (negatives are in opposite distribution)
# 2. chi2 - changing only sign, weights are the same
# 3. optimal value: simply log as usual (negatives are in the same distribution with sign -)
class ReweightLossFunction(AbstractLossFunction):
def __init__(self, regularization=5.):
"""
Loss function used to reweight events. Conventions:
y=0 - target distribution, y=1 - original distribution.
Weights after look like:
w = w_0 for target distribution
w = w_0 * exp(pred) for events from original distribution
(so pred for target distribution is ignored)
:param regularization: roughly, it's number of events added in each leaf to prevent overfitting.
"""
self.regularization = regularization
def fit(self, X, y, sample_weight):
assert numpy.all(numpy.in1d(y, [0, 1]))
if sample_weight is None:
self.sample_weight = numpy.ones(len(X), dtype=float)
else:
self.sample_weight = numpy.array(sample_weight, dtype=float)
self.y = y
# signs encounter transfer to opposite distribution
self.signs = (2 * y - 1) * numpy.sign(sample_weight)
self.mask_original = numpy.array(self.y)
self.mask_target = numpy.array(1 - self.y)
return self
def _compute_weights(self, y_pred):
"""We need renormalization at eac step"""
weights = self.sample_weight * numpy.exp(self.y * y_pred)
return check_sample_weight(self.y, weights, normalize=True, normalize_by_class=True)
def __call__(self, *args, **kwargs):
""" Loss function doesn't have precise expression """
return 0
def negative_gradient(self, y_pred):
return 0.
def prepare_tree_params(self, y_pred):
return self.signs, numpy.abs(self._compute_weights(y_pred))
def prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred):
weights = self._compute_weights(y_pred)
w_target = numpy.bincount(terminal_regions, weights=self.mask_target * weights)
w_original = numpy.bincount(terminal_regions, weights=self.mask_original * weights)
# suppressing possibly negative samples
w_target = w_target.clip(0)
w_original = w_original.clip(0)
return numpy.log(w_target + self.regularization) - numpy.log(w_original + self.regularization)
# endregion
# region FlatnessLossFunction
def _exp_margin(margin):
""" margin = - y_signed * y_pred """
return numpy.exp(numpy.clip(margin, -1e5, 2))
class AbstractFlatnessLossFunction(AbstractLossFunction):
"""Base class for FlatnessLosses"""
def __init__(self, uniform_features, uniform_label, power=2., fl_coefficient=3.,
allow_wrong_signs=True):
self.uniform_features = uniform_features
if isinstance(uniform_label, numbers.Number):
self.uniform_label = numpy.array([uniform_label])
else:
self.uniform_label = numpy.array(uniform_label)
self.power = power
self.fl_coefficient = fl_coefficient
self.allow_wrong_signs = allow_wrong_signs
def fit(self, X, y, sample_weight=None):
sample_weight = check_sample_weight(y, sample_weight=sample_weight,
normalize=True, normalize_by_class=True)
assert len(X) == len(y), 'lengths are different'
X = pandas.DataFrame(X)
self.group_indices = dict()
self.group_matrices = dict()
self.group_weights = dict()
occurences = numpy.zeros(len(X))
for label in self.uniform_label:
self.group_indices[label] = self._compute_groups_indices(X, y, label=label)
self.group_matrices[label] = group_indices_to_groups_matrix(self.group_indices[label], len(X))
self.group_weights[label] = compute_group_weights(self.group_matrices[label], sample_weight=sample_weight)
for group in self.group_indices[label]:
occurences[group] += 1
out_of_bins = (occurences == 0) & numpy.in1d(y, self.uniform_label)
if numpy.mean(out_of_bins) > 0.01:
warnings.warn("%i events out of all bins " % numpy.sum(out_of_bins), UserWarning)
self.y = y
self.y_signed = 2 * y - 1
self.sample_weight = numpy.copy(sample_weight)
self.divided_weight = sample_weight / numpy.maximum(occurences, 1)
return self
def _compute_groups_indices(self, X, y, label):
raise NotImplementedError('To be overriden in descendants.')
def __call__(self, pred):
# the actual value does not play any role in boosting
# optimizing here
return 0
def _compute_fl_derivatives(self, y_pred):
y_pred = numpy.ravel(y_pred)
neg_gradient = numpy.zeros(len(self.y), dtype=numpy.float)
for label in self.uniform_label:
label_mask = self.y == label
global_positions = numpy.zeros(len(y_pred), dtype=float)
global_positions[label_mask] = \
_compute_positions(y_pred[label_mask], sample_weight=self.sample_weight[label_mask])
for indices_in_bin in self.group_indices[label]:
local_pos = _compute_positions(y_pred[indices_in_bin],
sample_weight=self.sample_weight[indices_in_bin])
global_pos = global_positions[indices_in_bin]
bin_gradient = self.power * numpy.sign(local_pos - global_pos) * \
numpy.abs(local_pos - global_pos) ** (self.power - 1)
neg_gradient[indices_in_bin] += bin_gradient
neg_gradient *= self.divided_weight
# check that events outside uniform uniform classes are not touched
assert numpy.all(neg_gradient[~numpy.in1d(self.y, self.uniform_label)] == 0)
return neg_gradient
def negative_gradient(self, y_pred):
y_signed = self.y_signed
neg_gradient = self._compute_fl_derivatives(y_pred) * self.fl_coefficient
# adding ExpLoss
neg_gradient += y_signed * self.sample_weight * _exp_margin(-y_signed * y_pred)
if not self.allow_wrong_signs:
neg_gradient = y_signed * numpy.clip(y_signed * neg_gradient, 0, 1e5)
return neg_gradient
class BinFlatnessLossFunction(AbstractFlatnessLossFunction):
def __init__(self, uniform_features, uniform_label, n_bins=10, power=2., fl_coefficient=3.,
allow_wrong_signs=True):
r"""
This loss function contains separately penalty for non-flatness and for bad prediction quality.
See [FL]_ for details.
:math:`\text{loss} =\text{ExpLoss} + c \times \text{FlatnessLoss}`
FlatnessLoss computed using binning of uniform variables
:param list[str] uniform_features: names of features, along which we want to obtain uniformity of predictions
:param int|list[int] uniform_label: the label(s) of classes for which uniformity is desired
:param int n_bins: number of bins along each variable
:param float power: the loss contains the difference :math:`| F - F_bin |^p`, where p is power
:param float fl_coefficient: multiplier for flatness_loss. Controls the tradeoff of quality vs uniformity.
:param bool allow_wrong_signs: defines whether gradient may different sign from the "sign of class"
(i.e. may have negative gradient on signal). If False, values will be clipped to zero.
.. [FL] A. Rogozhnikov et al, New approaches for boosting to uniformity
http://arxiv.org/abs/1410.4140
"""
self.n_bins = n_bins
AbstractFlatnessLossFunction.__init__(self, uniform_features,
uniform_label=uniform_label, power=power,
fl_coefficient=fl_coefficient,
allow_wrong_signs=allow_wrong_signs)
def _compute_groups_indices(self, X, y, label):
"""Returns a list, each element is events' indices in some group."""
label_mask = y == label
extended_bin_limits = []
for var in self.uniform_features:
f_min, f_max = numpy.min(X[var][label_mask]), numpy.max(X[var][label_mask])
extended_bin_limits.append(numpy.linspace(f_min, f_max, 2 * self.n_bins + 1))
groups_indices = list()
for shift in [0, 1]:
bin_limits = []
for axis_limits in extended_bin_limits:
bin_limits.append(axis_limits[1 + shift:-1:2])
bin_indices = compute_bin_indices(X.ix[:, self.uniform_features].values, bin_limits=bin_limits)
groups_indices += list(bin_to_group_indices(bin_indices, mask=label_mask))
return groups_indices
class KnnFlatnessLossFunction(AbstractFlatnessLossFunction):
def __init__(self, uniform_features, uniform_label, n_neighbours=100, power=2., fl_coefficient=3.,
max_groups=5000, allow_wrong_signs=True, random_state=42):
r"""
This loss function contains separately penalty for non-flatness and for bad prediction quality.
See [FL]_ for details.
:math:`\text{loss} = \text{ExpLoss} + c \times \text{FlatnessLoss}`
FlatnessLoss computed using nearest neighbors in space of uniform features
:param list[str] uniform_features: names of features, along which we want to obtain uniformity of predictions
:param int|list[int] uniform_label: the label(s) of classes for which uniformity is desired
:param int n_neighbours: number of neighbors used in flatness loss
:param float power: the loss contains the difference :math:`| F - F_bin |^p`, where p is power
:param float fl_coefficient: multiplier for flatness_loss. Controls the tradeoff of quality vs uniformity.
:param bool allow_wrong_signs: defines whether gradient may different sign from the "sign of class"
(i.e. may have negative gradient on signal). If False, values will be clipped to zero.
:param int max_groups: to limit memory consumption when training sample is large,
we randomly pick this number of points with their members.
.. [FL] A. Rogozhnikov et al, New approaches for boosting to uniformity
http://arxiv.org/abs/1410.4140
"""
self.n_neighbours = n_neighbours
self.max_groups = max_groups
self.random_state = random_state
AbstractFlatnessLossFunction.__init__(self, uniform_features,
uniform_label=uniform_label, power=power,
fl_coefficient=fl_coefficient,
allow_wrong_signs=allow_wrong_signs)
def _compute_groups_indices(self, X, y, label):
mask = y == label
self.random_state = check_random_state(self.random_state)
knn_indices = compute_knn_indices_of_signal(X[self.uniform_features], mask,
n_neighbours=self.n_neighbours)[mask, :]
if len(knn_indices) > self.max_groups:
selected_group = self.random_state.choice(len(knn_indices), size=self.max_groups, replace=False)
return knn_indices[selected_group, :]
else:
return knn_indices
# endregion
|
apache-2.0
|
alistairlow/tensorflow
|
tensorflow/examples/learn/text_classification.py
|
8
|
6685
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for DNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 10
EMBEDDING_SIZE = 50
n_words = 0
MAX_LABEL = 15
WORDS_FEATURE = 'words' # Name of the input words feature.
def estimator_spec_for_softmax_classification(
logits, labels, mode):
"""Returns EstimatorSpec instance for softmax classification."""
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def bag_of_words_model(features, labels, mode):
"""A bag-of-words model. Note it disregards the word order in the text."""
bow_column = tf.feature_column.categorical_column_with_identity(
WORDS_FEATURE, num_buckets=n_words)
bow_embedding_column = tf.feature_column.embedding_column(
bow_column, dimension=EMBEDDING_SIZE)
bow = tf.feature_column.input_layer(
features,
feature_columns=[bow_embedding_column])
logits = tf.layers.dense(bow, MAX_LABEL, activation=None)
return estimator_spec_for_softmax_classification(
logits=logits, labels=labels, mode=mode)
def rnn_model(features, labels, mode):
"""RNN model to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features[WORDS_FEATURE], vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = tf.unstack(word_vectors, axis=1)
# Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
cell = tf.nn.rnn_cell.GRUCell(EMBEDDING_SIZE)
# Create an unrolled Recurrent Neural Networks to length of
# MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
_, encoding = tf.nn.static_rnn(cell, word_list, dtype=tf.float32)
# Given encoding of RNN, take encoding of last step (e.g hidden size of the
# neural network of last step) and pass it as features for softmax
# classification over output classes.
logits = tf.layers.dense(encoding, MAX_LABEL, activation=None)
return estimator_spec_for_softmax_classification(
logits=logits, labels=labels, mode=mode)
def main(unused_argv):
global n_words
tf.logging.set_verbosity(tf.logging.INFO)
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.Series(dbpedia.train.data[:,1])
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.Series(dbpedia.test.data[:,1])
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(
MAX_DOCUMENT_LENGTH)
x_transform_train = vocab_processor.fit_transform(x_train)
x_transform_test = vocab_processor.transform(x_test)
x_train = np.array(list(x_transform_train))
x_test = np.array(list(x_transform_test))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
# Switch between rnn_model and bag_of_words_model to test different models.
model_fn = rnn_model
if FLAGS.bow_model:
# Subtract 1 because VocabularyProcessor outputs a word-id matrix where word
# ids start from 1 and 0 means 'no word'. But
# categorical_column_with_identity assumes 0-based count and uses -1 for
# missing word.
x_train -= 1
x_test -= 1
model_fn = bag_of_words_model
classifier = tf.estimator.Estimator(model_fn=model_fn)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_train},
y=y_train,
batch_size=len(x_train),
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
parser.add_argument(
'--bow_model',
default=False,
help='Run with BOW model instead of RNN.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
apache-2.0
|
elkingtonmcb/scikit-learn
|
examples/text/hashing_vs_dict_vectorizer.py
|
284
|
3265
|
"""
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
|
bsd-3-clause
|
miaecle/deepchem
|
examples/factors/FACTORS_correlations.py
|
8
|
1407
|
"""
Script that computes correlations of FACTORS tasks.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import numpy as np
import tempfile
import shutil
import deepchem as dc
import pandas as pd
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from FACTORS_datasets import load_factors
###Load data###
shard_size = 2000
print("About to load FACTORS data.")
FACTORS_tasks, datasets, transformers = load_factors(shard_size=shard_size)
train_dataset, valid_dataset, test_dataset = datasets
y_train = train_dataset.y
n_tasks = y_train.shape[1]
all_results = []
for task in range(n_tasks):
y_task = y_train[:, task]
for other_task in range(n_tasks):
if task == other_task:
continue
y_other = y_train[:, other_task]
r2 = dc.metrics.pearson_r2_score(y_task, y_other)
print("r2 for %s-%s is %f" % (task, other_task, r2))
all_results.append(r2)
# the histogram of the data
n, bins, patches = plt.hist(np.array(all_results), 50, normed=True, stacked=True,
facecolor='green', alpha=0.75)
plt.xlabel('Cross-task Correlations')
plt.ylabel('Probability Density')
plt.title('Histogram of Factors Intertask Correlations')
plt.grid(True)
plt.savefig("Factors_correlations.png")
|
mit
|
flohorovicic/pynoddy
|
pynoddy/history.py
|
1
|
84271
|
# coding=utf-8
"""Noddy history file wrapper
Created on 24/03/2014
@author: Florian Wellmann
"""
import time # for header in model generation
import numpy as np
# import numpy as np
# import matplotlib.pyplot as plt
from . import events
class NoddyHistory(object):
"""Class container for Noddy history files"""
def __init__(self, history=None, **kwds):
"""Methods to analyse and change Noddy history files
**Arguments**:
- *history* = string : Name of Noddy history file
**Optional Keywords**:
- *url* = url : link to history file on web (e.g. to download
and open directly from Atlas of Structural Geophysics,
http://virtualexplorer.com.au/special/noddyatlas/index.html
- *verbose* = True if this function should print output to the printstream. Default is False.
Note: if both a (local) history is given and a URL, the local
file is opened!
"""
vb = kwds.get('verbose', False)
if history is None:
if "url" in kwds:
self.load_history_from_url(kwds['url'])
self.determine_events(verbose=vb)
else:
# generate a new history
self.create_new_history()
else:
# load existing history
self.load_history(history)
self.determine_events(verbose=vb)
def __repr__(self):
"""Print out model information"""
return self.get_info_string()
def info(self, **kwds):
"""Print out model information
**Optional keywords**:
- *events_only* = bool : only information on events
"""
print(self.get_info_string(**kwds))
def get_info_string(self, **kwds):
"""Get model information as string
**Optional keywords**:
- *events_only* = bool : only information on events
"""
events_only = kwds.get("events_only", False)
local_os = ""
if not events_only:
# First: check if all information available
if not hasattr(self, 'extent_x'): self.get_extent()
if not hasattr(self, 'origin_x'): self.get_origin()
if not hasattr(self, 'cube_size'): self.get_cube_size()
if not hasattr(self, 'filename'): self.get_filename()
if not hasattr(self, 'date_saved'): self.get_date_saved()
local_os += (60 * "*" + "\n\t\t\tModel Information\n" + 60 * "*")
local_os += "\n\n"
if self.n_events == 0:
local_os += "The model does not yet contain any events\n"
else:
local_os += ("This model consists of %d events:\n" % self.n_events)
for k, ev in list(self.events.items()):
local_os += ("\t(%d) - %s\n" % (k, ev.event_type))
if not events_only:
local_os += "The model extent is:\n"
local_os += ("\tx - %.1f m\n" % self.extent_x)
local_os += ("\ty - %.1f m\n" % self.extent_y)
local_os += ("\tz - %.1f m\n" % self.extent_z)
local_os += 'Number of cells in each direction:\n'
local_os += ("\tnx = %d\n" % (self.extent_x / self.cube_size))
local_os += ("\tny = %d\n" % (self.extent_y / self.cube_size))
local_os += ("\tnz = %d\n" % (self.extent_z / self.cube_size))
local_os += ("The model origin is located at: \n\t(%.1f, %.1f, %.1f)\n" % (self.origin_x,
self.origin_y,
self.origin_z))
local_os += ("The cubesize for model export is: \n\t%d m\n" % self.cube_size)
# and now some metadata
local_os += "\n\n"
local_os += (60 * "*" + "\n\t\t\tMeta Data\n" + 60 * "*")
local_os += "\n\n"
local_os += ("The filename of the model is:\n\t%s\n" % self.filename)
local_os += ("It was last saved (if origin was a history file!) at:\n\t%s\n" % self.date_saved)
return local_os
def get_origin(self):
"""Get coordinates of model origin and return and store in local variables
**Returns**: (origin_x, origin_y, origin_z)
"""
# check if footer_lines exist (e.g. read in from file)
# if not: create from template
if not hasattr(self, "footer_lines"):
self.create_footer_from_template()
for i, line in enumerate(self.footer_lines):
if "Origin X" in line:
self.origin_x = float(self.footer_lines[i].split("=")[1])
self.origin_y = float(self.footer_lines[i + 1].split("=")[1])
self.origin_z = float(self.footer_lines[i + 2].split("=")[1])
break
return self.origin_x, self.origin_y, self.origin_z
def set_origin(self, origin_x, origin_y, origin_z):
"""Set coordinates of model origin and update local variables
**Arguments**:
- *origin_x* = float : x-location of model origin
- *origin_y* = float : y-location of model origin
- *origin_z* = float : z-location of model origin
"""
# check if footer_lines exist (e.g. read in from file)
# if not: create from template
if not hasattr(self, "footer_lines"):
self.create_footer_from_template()
self.origin_x = origin_x
self.origin_y = origin_y
self.origin_z = origin_z
origin_x_line = " Origin X = %.2f\n" % origin_x
origin_y_line = " Origin Y = %.2f\n" % origin_y
origin_z_line = " Origin Z = %.2f\n" % origin_z
for i, line in enumerate(self.footer_lines):
if "Origin X" in line:
self.footer_lines[i] = origin_x_line
self.footer_lines[i + 1] = origin_y_line
self.footer_lines[i + 2] = origin_z_line
break
def get_extent(self):
"""Get model extent and return and store in local variables
**Returns**: (extent_x, extent_y, extent_z)
"""
# check if footer_lines exist (e.g. read in from file)
# if not: create from template
if not hasattr(self, "footer_lines"):
self.create_footer_from_template()
for i, line in enumerate(self.footer_lines):
if "Length X" in line:
self.extent_x = float(self.footer_lines[i].split("=")[1])
self.extent_y = float(self.footer_lines[i + 1].split("=")[1])
self.extent_z = float(self.footer_lines[i + 2].split("=")[1])
break
return self.extent_x, self.extent_y, self.extent_z
def set_extent(self, extent_x, extent_y, extent_z):
"""Set model extent and update local variables
**Arguments**:
- *extent_x* = float : extent in x-direction
- *extent_y* = float : extent in y-direction
- *extent_z* = float : extent in z-direction
"""
# check if footer_lines exist (e.g. read in from file)
# if not: create from template
if not hasattr(self, "footer_lines"):
self.create_footer_from_template()
self.extent_x = extent_x
self.extent_y = extent_y
self.extent_z = extent_z
extent_x_line = " Length X = %.2f\n" % extent_x
extent_y_line = " Length Y = %.2f\n" % extent_y
extent_z_line = " Length Z = %.2f\n" % extent_z
for i, line in enumerate(self.footer_lines):
if "Length X" in line:
self.footer_lines[i] = extent_x_line
self.footer_lines[i + 1] = extent_y_line
self.footer_lines[i + 2] = extent_z_line
break
def get_drillhole_data(self, x, y, **kwds):
"""Get geology values along 1-D profile at position x,y with a 1 m resolution
The following steps are performed:
1. creates a copy of the entire object,
2. sets values of origin, extent and geology cube size,
3. saves model to a temporary file,
4. runs Noddy on that file
5. opens and analyses output
6. deletes temporary files
Note: this method only works if write access to current directory
is enabled and noddy can be executed!
**Arguments**:
- *x* = float: x-position of drillhole
- *y* = float: y-position of drillhole
**Optional Arguments**:
- *z_min* = float : minimum depth of drillhole (default: model range)
- *z_max* = float : maximum depth of drillhole (default: model range)
- *resolution* = float : resolution along profile (default: 1 m)
"""
# resolve keywords
resolution = kwds.get("resolution", 1)
self.get_extent()
self.get_origin()
z_min = kwds.get("z_min", self.origin_z)
z_max = kwds.get("z_max", self.extent_z)
# 1. create copy
import copy
tmp_his = copy.deepcopy(self)
tmp_his.write_history("test.his")
# 2. set values
tmp_his.set_origin(x, y, z_min)
tmp_his.set_extent(resolution, resolution, z_max)
tmp_his.change_cube_size(resolution)
# 3. save temporary file
tmp_his_file = "tmp_1D_drillhole.his"
tmp_his.write_history(tmp_his_file)
tmp_out_file = "tmp_1d_out"
# 4. run noddy
import pynoddy
import pynoddy.output
pynoddy.compute_model(tmp_his_file, tmp_out_file)
# 5. open output
tmp_out = pynoddy.output.NoddyOutput(tmp_out_file)
# 6.
return tmp_out.block[0, 0, :]
def load_history(self, history):
"""Load Noddy history
**Arguments**:
- *history* = string : Name of Noddy history file
"""
self.history_lines = open(history, 'r').readlines()
# set flag for model loaded from file
self._from_file = True
# get footer lines
self.get_footer_lines()
def load_history_from_url(self, url):
"""Directly load a Noddy history from a URL
This method is useful to load a model from the Structural Geophysics
Atlas on the pages of the Virtual Explorer.
See: http://tectonique.net/asg
**Arguments**:
- *url* : url of history file
"""
# test if python 2 or 3 are running for appropriate urllib functionality
import sys
if sys.version_info[0] < 3:
import urllib2
response = urllib2.urlopen(url)
tmp_lines = response.read().split("\n")
else:
# from urllib import urlopen # , urllib.error, urllib.parse
import urllib
with urllib.request.urlopen(url) as f:
output = f.read().decode('utf-8')
# response = urllib.request.urlopen(url)
tmp_lines = output.split("\n")
# tmp_lines = response.read().decode("utf-8").split("\n")
self.history_lines = []
for line in tmp_lines:
# append EOL again for consistency
self.history_lines.append(line + "\n")
# set flag for model loaded from URL
self._from_url = True
# get footer lines
self.get_footer_lines()
def determine_model_stratigraphy(self):
"""Determine stratigraphy of entire model from all events"""
self.model_stratigraphy = []
for e in np.sort(list(self.events.keys())):
if self.events[e].event_type == 'STRATIGRAPHY':
self.model_stratigraphy += self.events[e].layer_names
if self.events[e].event_type == 'UNCONFORMITY':
self.model_stratigraphy += self.events[e].layer_names
if self.events[e].event_type == 'DYKE':
self.model_stratigraphy += self.events[e].name
if self.events[e].event_type == 'PLUG':
self.model_stratigraphy += self.events[e].name
def determine_events(self, **kwds):
"""Determine events and save line numbers
.. note:: Parsing of the history file is based on a fixed Noddy output order.
If this is, for some reason (e.g. in a changed version of Noddy) not the case, then
this parsing might fail!
**Optional Keywords**:
- verbose = True if this function is should write to the print bufffer, otherwise False. Default is False.
"""
vb = kwds.get('verbose', False)
self._raw_events = []
for i, line in enumerate(self.history_lines):
if "No of Events" in line:
self.n_events = int(line.split("=")[1])
elif "Event #" in line:
event = {'type': line.split('=')[1].rstrip(), 'num': int(line[7:9]), 'line_start': i}
self._raw_events.append(event)
# finally: if the definition for BlockOptions starts, the event definition is over
elif "BlockOptions" in line:
last_event_stop = i - 2
# now: find the line ends for the single event blocks
for i, event in enumerate(self._raw_events[1:]):
self._raw_events[i]['line_end'] = event['line_start'] - 1
# now adjust for last event
self._raw_events[-1]['line_end'] = last_event_stop
self.events = {} # idea: create events as dictionary so that it is easier
# to swap order later!
# now create proper event objects for these events
if vb:
print("Loaded model with the following events:")
for e in self._raw_events:
event_lines = self.history_lines[e['line_start']:e['line_end'] + 1]
if vb:
print(e['type'])
if 'FAULT' in e['type']:
ev = events.Fault(lines=event_lines)
elif 'SHEAR_ZONE' in e['type']:
ev = events.Shear(lines=event_lines)
elif 'FOLD' in e['type']:
ev = events.Fold(lines=event_lines)
elif 'UNCONFORMITY' in e['type']:
ev = events.Unconformity(lines=event_lines)
elif 'STRATIGRAPHY' in e['type']:
# event_lines = event_lines[:-1]
ev = events.Stratigraphy(lines=event_lines)
elif 'TILT' in e['type']: # AK
ev = events.Tilt(lines=event_lines)
elif 'DYKE' in e['type']:
ev = events.Dyke(lines=event_lines)
elif 'PLUG' in e['type']:
ev = events.Plug(lines=event_lines)
elif 'STRAIN' in e['type']:
ev = events.Strain(lines=event_lines)
else:
print("Warning: event of type %s has not been implemented in PyNoddy yet" % e['type'])
continue
# now set shared attributes (those defined in superclass Event)
order = e['num'] # retrieve event number
self.events[order] = ev # store events sequentially
# determine overall begin and end of the history events
self.all_events_begin = self._raw_events[0]['line_start']
self.all_events_end = self._raw_events[-1]['line_end']
def copy_events(self):
"""Create a copy of the current event state"""
import copy
return copy.deepcopy(self.events)
def get_cube_size(self, **kwds):
"""Determine cube size for model export
**Optional Args**
-type: choose geology or geophysics cube size to return. Should be either 'Geology' (default) or 'Geophysics'
"""
# get args
sim_type = kwds.get("type", 'Geophysics') # everything seems to use this
cube_string = 'Geophysics Cube Size' # get geology cube size by default
if 'Geology' in sim_type:
cube_string = 'Geology Cube Size' # instead get geology cube size
print(
"Warning: pynoddy uses the geophysics cube size for all calculations... changing the geology cube size will have no effect internally.")
# check if footer exists, if not: create from template
if not hasattr(self, "footer_lines"):
self.create_footer_from_template()
for line in self.footer_lines:
if cube_string in line:
self.cube_size = float(line.split('=')[1].rstrip())
return self.cube_size
def get_filename(self):
"""Determine model filename from history file/ header"""
self.filename = self.history_lines[0].split('=')[1].rstrip()
def get_date_saved(self):
"""Determine the last savepoint of the file"""
self.date_saved = self.history_lines[1].split('=')[1].rstrip()
def change_cube_size(self, cube_size):
"""Change the model cube size (isotropic)
**Arguments**:
- *cube_size* = float : new model cube size
"""
# check if footer_lines exist (e.g. read in from file)
# if not: create from template
if not hasattr(self, "footer_lines"):
self.create_footer_from_template()
# lines_new = self.history_lines[:]
for i, line in enumerate(self.footer_lines):
if "Geophysics Cube Size" in line: # correct line, make change
l = line.split('=')
l_new = '%7.2f\r\n' % cube_size
line_new = l[0] + "=" + l_new
self.footer_lines[i] = line_new
if "Geology Cube Size" in line: # change geology cube size also
l = line.split('=')
l_new = '%7.2f\r\n' % cube_size
line_new = l[0] + "=" + l_new
self.footer_lines[i] = line_new
# assign changed lines back to object
# self.history_lines = lines_new[:]
def get_footer_lines(self):
"""Get the footer lines from self.history_lines
The footer contains everything below events (all settings, etc.)"""
# get id of footer from history lines
for i, line in enumerate(self.history_lines):
if "#BlockOptions" in line:
break
self.footer_lines = self.history_lines[i:]
def create_footer_from_template(self):
"""Create model footer (with all settings) from template"""
self.footer_lines = []
for line in _Templates().footer.split("\n"):
line = line.replace(" ", "\t")
self.footer_lines.append(line + "\n")
def swap_events(self, event_num_1, event_num_2):
"""Swap two geological events in the timeline
**Arguments**:
- *event_num_1/2* = int : number of events to be swapped ("order")
"""
# events have to be copied, otherwise only a reference is passed!
event_tmp = self.events[event_num_1]
self.events[event_num_1] = self.events[event_num_2]
self.events[event_num_2] = event_tmp
self.update_event_numbers()
def reorder_events(self, reorder_dict):
"""Reorder events accoring to assignment in reorder_dict
**Arguments**:
- *reorder_dict* = dict : for example {1 : 2, 2 : 3, 3 : 1}
"""
tmp_events = self.events.copy()
for key, value in list(reorder_dict.items()):
try:
tmp_events[value] = self.events[key]
except KeyError:
print("Event with id %d is not defined, please check!" % value)
self.events = tmp_events.copy()
self.update_event_numbers()
def update_event_numbers(self):
"""Update event numbers in 'Event #' line in noddy history file"""
for key, event in list(self.events.items()):
event.set_event_number(key)
def update_all_event_properties(self):
"""Update properties of all events - in case changes were made"""
for event in list(self.events.values()):
event.update_properties()
#
# class NewHistory():
# """Methods to create a Noddy model"""
#
def create_new_history(self):
"""Methods to create a Noddy model
"""
# set event counter
self.event_counter = 0
self.all_events_begin = 7 # default after header
self.all_events_end = 7
# initialise history lines
self.history_lines = []
self.events = {}
def get_ev_counter(self):
"""Event counter for implicit and continuous definition of events"""
self.event_counter += 1
return self.event_counter
def add_event(self, event_type, event_options, **kwds):
"""Add an event type to history
**Arguments**:
- *event_type* = string : type of event, legal options to date are:
'stratigraphy', 'fault', 'fold', 'unconformity'
- *event_options* = list : required options to create event (event dependent)
**Optional keywords**:
- *event_num* = int : event number (default: implicitly defined with increasing counter)
"""
event_num = kwds.get("event_num", self.get_ev_counter())
if event_type == 'stratigraphy':
ev = self._create_stratigraphy(event_options)
ev.event_type = 'STRATIGRAPHY'
elif event_type == 'fault':
ev = self._create_fault(event_options)
ev.event_type = 'FAULT'
elif event_type == 'tilt': # AK
ev = self._create_tilt(event_options)
ev.event_type = 'TILT'
elif event_type == 'unconformity': # AK
ev = self._create_unconformity(event_options)
ev.event_type = 'UNCONFORMITY'
elif event_type == 'fold':
ev = self._create_fold(event_options)
ev.event_type = 'FOLD'
else:
raise NameError('Event type %s not (yet) implemented' % event_type)
ev.set_event_number(event_num)
self.events[event_num] = ev
# update beginning and ending of events in history
self.all_events_end = self.all_events_end + len(ev.event_lines)
# add event to history lines, as well (for consistency with other methods)
self.history_lines[:self.all_events_begin] + \
ev.event_lines + \
self.history_lines[self.all_events_end:]
def _create_header(self):
"""Create model header, include actual date"""
t = time.localtime() # get current time
time_string = "%d/%d/%d %d:%d:%d" % (t.tm_mday,
t.tm_mon,
t.tm_year,
t.tm_hour,
t.tm_min,
t.tm_sec)
self.header_lines = """#Filename = """ + self.filename + """
#Date Saved = """ + time_string + """
FileType = 111
Version = 7.03
"""
@staticmethod
def _create_stratigraphy(event_options):
"""Create a stratigraphy event
**Arguments**:
- *event_options* = list : list of required and optional settings for event
Options are:
'num_layers' = int : number of layers (required)
'layer_names' = list of strings : names for layers (default names otherwise)
'layer_thickness' = list of floats : thicknesses for all layers
"""
ev = events.Stratigraphy()
tmp_lines = [""]
tmp_lines.append("\tNum Layers\t= %d" % event_options['num_layers'])
for i in range(event_options['num_layers']):
"""Add stratigraphy layers"""
layer_name = event_options['layer_names'][i]
try:
density = event_options['density'][i]
except KeyError:
density = 4.0
cum_thickness = np.cumsum(event_options['layer_thickness'])
layer_lines = _Templates().strati_layer
# now replace required variables
layer_lines = layer_lines.replace("$NAME$", layer_name)
layer_lines = layer_lines.replace("$HEIGHT$", "%.1f" % cum_thickness[i])
layer_lines = layer_lines.replace(" ", "\t")
layer_lines = layer_lines.replace("$DENSITY$", "%e" % density)
# split lines and add to event lines list:
for layer_line in layer_lines.split("\n"):
tmp_lines.append(layer_line)
# append event name
tmp_lines.append("""\tName\t= Strat""")
# event lines are defined in list:
tmp_lines_list = []
for line in tmp_lines:
tmp_lines_list.append(line + "\n")
ev.set_event_lines(tmp_lines_list)
ev.num_layers = event_options['num_layers']
return ev
def _create_fault(self, event_options):
"""Create a fault event
**Arguments**:
- *event_options* = list : list of required and optional settings for event;
Options are:
'name' = string : name of fault event
'pos' = (x,y,z) : position of reference point (floats)
.. note:: for convenience, it is possible to assign 'top' to z
for position at "surface"
'dip_dir' = [0,360] : dip direction of fault
'dip' = [0,90] : dip angle of fault
'slip' = float : slip along fault
'geometry' = 'Translation', 'Curved' : geometry of fault plane (default: 'Translation')
'movement' = 'Hanging Wall', 'Foot Wall' : relative block movement (default: 'Hanging Wall')
'rotation' = float: fault rotation (default: 30.0)
'amplitude' = float: (default: 2000.0)
'radius' = float: (default: 1000.0)
'xaxis' = float: (default: 2000.0)
'yaxis' = float: (default: 2000.0)
'zaxis' = float: (default: 2000.0)
"""
ev = events.Fault()
tmp_lines = [""]
fault_lines = _Templates.fault
# substitute text with according values
fault_lines = fault_lines.replace("$NAME$", event_options['name'])
fault_lines = fault_lines.replace("$POS_X$", "%.1f" % event_options['pos'][0])
fault_lines = fault_lines.replace("$POS_Y$", "%.1f" % event_options['pos'][1])
if event_options['pos'] == 'top':
# recalculate z-value to be at top of model
z = self.zmax
fault_lines = fault_lines.replace("$POS_Z$", "%.1f" % z)
else:
fault_lines = fault_lines.replace("$POS_Z$", "%.1f" % event_options['pos'][2])
fault_lines = fault_lines.replace("$DIP_DIR$", "%.1f" % event_options['dip_dir'])
fault_lines = fault_lines.replace("$DIP$", "%.1f" % event_options['dip'])
fault_lines = fault_lines.replace("$SLIP$", "%.1f" % event_options['slip'])
fault_lines = fault_lines.replace("$MOVEMENT$", "%s" % event_options.get('movement', 'Hanging Wall'))
fault_lines = fault_lines.replace("$GEOMETRY$", "%s" % event_options.get('geometry', 'Translation'))
fault_lines = fault_lines.replace("$ROTATION$", "%.1f" % event_options.get('rotation', 30.0))
fault_lines = fault_lines.replace("$AMPLITUDE$", "%.1f" % event_options.get('amplitude', 2000.0))
fault_lines = fault_lines.replace("$RADIUS$", "%.1f" % event_options.get('radius', 1000.0))
fault_lines = fault_lines.replace("$XAXIS$", "%.1f" % event_options.get('xaxis', 2000.0))
fault_lines = fault_lines.replace("$YAXIS$", "%.1f" % event_options.get('yaxis', 2000.0))
fault_lines = fault_lines.replace("$ZAXIS$", "%.1f" % event_options.get('zaxis', 2000.0))
# $GEOMETRY$ Translation
# now split lines and add as list entries to event lines
# event lines are defined in list:
# split lines and add to event lines list:
for layer_line in fault_lines.split("\n"):
tmp_lines.append(layer_line)
tmp_lines_list = []
for line in tmp_lines:
tmp_lines_list.append(line + "\n")
ev.set_event_lines(tmp_lines_list)
return ev
def _create_fold(self, event_options):
"""Create a fold event
**Arguments**:
- *event_options* = list : list of required and optional settings for event;
Options are:
'name' = string : name of fault event
'pos' = (x,y,z) : position of reference point (floats)
.. note:: for convenience, it is possible to assign 'top' to z
for position at "surface"
'amplitude' = float : amplitude of fold
'wavelength' = float : wavelength of fold
'dip_dir' = float : dip (plane) direction (default: 90)
'dip' = float : fault (plane) dip (default: 90)
"""
ev = events.Fault()
tmp_lines = [""]
fault_lines = _Templates.fold
# substitute text with according values
fault_lines = fault_lines.replace("$NAME$", event_options['name'])
fault_lines = fault_lines.replace("$POS_X$", "%.1f" % event_options['pos'][0])
fault_lines = fault_lines.replace("$POS_Y$", "%.1f" % event_options['pos'][1])
if event_options['pos'] == 'top':
# recalculate z-value to be at top of model
z = self.zmax
fault_lines = fault_lines.replace("$POS_Z$", "%.1f" % z)
else:
fault_lines = fault_lines.replace("$POS_Z$", "%.1f" % event_options['pos'][2])
fault_lines = fault_lines.replace("$WAVELENGTH$", "%.1f" % event_options['wavelength'])
fault_lines = fault_lines.replace("$AMPLITUDE$", "%.1f" % event_options['amplitude'])
# fault_lines = fault_lines.replace("$SLIP$", "%.1f" % event_options['slip'])
fault_lines = fault_lines.replace("$DIP_DIR$", "%.1f" % event_options.get('dip_dir', 90.0))
fault_lines = fault_lines.replace("$DIP$", "%.1f" % event_options.get('dip', 90.0))
# now split lines and add as list entries to event lines
# event lines are defined in list:
# split lines and add to event lines list:
for layer_line in fault_lines.split("\n"):
tmp_lines.append(layer_line)
tmp_lines_list = []
for line in tmp_lines:
tmp_lines_list.append(line + "\n")
ev.set_event_lines(tmp_lines_list)
return ev
# AK 2014-10
def _create_tilt(self, event_options):
"""Create a tilt event
**Arguments**:
- *event_options* = list : list of required and optional settings for event;
Options are:
'name' = string : name of tilt event
'pos' = (x,y,z) : position of reference point (floats)
.. note:: for convenience, it is possible to assign 'top' to z
for position at "surface"
'rotation' = [0,360] : dip?
'plunge_direction' = [0,360] : strike of plunge, measured from x axis
'plunge' = float : ?
"""
ev = events.Tilt()
tmp_lines = [""]
tilt_lines = _Templates.tilt
# substitute text with according values
tilt_lines = tilt_lines.replace("$NAME$", event_options['name'])
tilt_lines = tilt_lines.replace("$POS_X$", "%.1f" % event_options['pos'][0])
tilt_lines = tilt_lines.replace("$POS_Y$", "%.1f" % event_options['pos'][1])
if event_options['pos'] == 'top':
# recalculate z-value to be at top of model
z = self.zmax
tilt_lines = tilt_lines.replace("$POS_Z$", "%.1f" % z)
else:
tilt_lines = tilt_lines.replace("$POS_Z$", "%.1f" % event_options['pos'][2])
tilt_lines = tilt_lines.replace("$ROTATION$", "%.1f" % event_options['rotation'])
tilt_lines = tilt_lines.replace("$PLUNGE_DIRECTION$", "%.1f" % event_options['plunge_direction'])
tilt_lines = tilt_lines.replace("$PLUNGE$", "%.1f" % event_options['plunge'])
# now split lines and add as list entries to event lines
# event lines are defined in list:
# split lines and add to event lines list:
for tilt_line in tilt_lines.split("\n"):
tmp_lines.append(tilt_line)
tmp_lines_list = []
for line in tmp_lines:
tmp_lines_list.append(line + "\n")
ev.set_event_lines(tmp_lines_list)
return ev
# AK 2014-10
def _create_unconformity(self, event_options):
"""Create a unconformity event
**Arguments**:
- *event_options* = list : list of required and optional settings for event;
Options are:
'name' = string : name of unconformity event
'pos' = (x,y,z) : position of reference point (floats)
.. note:: for convenience, it is possible to assign 'top' to z
for position at "surface"
'rotation' = [0,360] : dip?
'plunge_direction' = [0,360] : strike of plunge, measured from x axis
'plunge' = float : ?
"""
ev = events.Unconformity()
tmp_lines = [""]
unconformity_lines = _Templates.unconformity
# substitute text with according values
unconformity_lines = unconformity_lines.replace("$NAME$", event_options['name'])
unconformity_lines = unconformity_lines.replace("$POS_X$", "%.1f" % event_options['pos'][0])
unconformity_lines = unconformity_lines.replace("$POS_Y$", "%.1f" % event_options['pos'][1])
if event_options['pos'] == 'top':
# recalculate z-value to be at top of model
z = self.zmax
unconformity_lines = unconformity_lines.replace("$POS_Z$", "%.1f" % z)
else:
unconformity_lines = unconformity_lines.replace("$POS_Z$", "%.1f" % event_options['pos'][2])
unconformity_lines = unconformity_lines.replace("$DIP_DIRECTION$", "%.1f" % event_options['dip_direction'])
unconformity_lines = unconformity_lines.replace("$DIP$", "%.1f" % event_options['dip'])
# split lines and add to event lines list:
for unconformity_line in unconformity_lines.split("\n"):
tmp_lines.append(unconformity_line)
# unconformity has a stratigraphy block
tmp_lines.append("\tNum Layers\t= %d" % event_options['num_layers'])
for i in range(event_options['num_layers']):
"""Add stratigraphy layers"""
layer_name = event_options['layer_names'][i]
cum_thickness = np.cumsum(event_options['layer_thickness'])
layer_lines = _Templates().strati_layer
# now replace required variables
layer_lines = layer_lines.replace("$NAME$", layer_name)
layer_lines = layer_lines.replace("$HEIGHT$", "%.1f" % cum_thickness[i])
layer_lines = layer_lines.replace(" ", "\t")
# split lines and add to event lines list:
for layer_line in layer_lines.split("\n"):
tmp_lines.append(layer_line)
# append event name
tmp_lines.append("""\tName\t= %s""" % event_options.get('name', 'Unconf'))
tmp_lines_list = []
for line in tmp_lines:
tmp_lines_list.append(line + "\n")
ev.set_event_lines(tmp_lines_list)
return ev
def set_event_params(self, params_dict):
"""set multiple event parameters according to settings in params_dict
**Arguments**:
- *params_dict* = dictionary : entries to set (multiple) parameters
"""
for key, sub_dict in list(params_dict.items()):
for sub_key, val in list(sub_dict.items()):
self.events[key].properties[sub_key] = val
def change_event_params(self, changes_dict):
"""Change multiple event parameters according to settings in changes_dict
**Arguments**:
- *changes_dict* = dictionary : entries define relative changes for (multiple) parameters
Per default, the values in the dictionary are added to the event parameters.
"""
# print changes_dict
for key, sub_dict in list(changes_dict.items()): # loop through events (key)
for sub_key, val in list(sub_dict.items()): # loop through parameters being changed (sub_key)
if isinstance(sub_key, int): # in this case, it is the layer id of a stratigraphic layer!
self.events[key].layers[sub_key].properties[val['property']] += val['val']
else:
self.events[key].properties[sub_key] += val
def get_event_params(self, event_number):
'''
Returns the parameter dictionary for a given event.
**Arguments**:
- *event_number* = the event to get a parameter for (integer)
**Returns**
- Returns the parameter dictionary for the requested event
'''
return self.events[event_number].properties
def get_event_param(self, event_number, name):
"""
Returns the value of a given parameter for a given event.
**Arguments**:
- *event_number* = the event to get a parameter for (integer)
- *name* = the name of the parameter to retreive (string)
**Returns**
- Returns the value of the request parameter, or None if it does not
exists.
"""
try:
ev = self.events[event_number].properties
return ev[name]
except KeyError:
return None # property does not exist
def write_history(self, filename):
"""Write history to new file
**Arguments**:
- *filename* = string : filename of new history file
.. hint:: Just love it how easy it is to 'write history' with Noddy ;-)
"""
# before saving: update all event properties (in case changes were made)
self.update_all_event_properties()
# first: create header
if not hasattr(self, "filename"):
self.filename = filename
self._create_header()
# initialise history lines
history_lines = []
# add header
for line in self.header_lines.split("\n"):
history_lines.append(line + "\n")
# add number of events
history_lines.append("No of Events\t= %d\n" % len(self.events))
# add events
for event_id in sorted(self.events.keys()):
for line in self.events[event_id].event_lines:
history_lines.append(line)
# add footer: from original footer or from template (if new file):
if not hasattr(self, "footer_lines"):
self.create_footer_from_template()
# add footer
for line in self.footer_lines:
history_lines.append(line)
f = open(filename, 'w')
for i, line in enumerate(history_lines):
# add empty line before "BlockOptions", if not there:
if ('BlockOptions' in line) and (history_lines[i - 1] != "\n"):
f.write("\n")
# write line
f.write(line)
f.close()
# ===============================================================================
# End of NoddyHistory
# ===============================================================================
# ===============================================================================
# Two extra PyNoddy functions for creating history files based on fault traces
# ===============================================================================
def setUpFaultRepresentation(Data, SlipParam=0.04, xy_origin=[0, 0, 0],
RefineFault=True, RefineDistance=350,
interpType='linear'):
"""
This is a function that takes a csv files with fault vertices and manipulates
the information so it can be easily input into PyNoddy
Parameters
----------
Data : A pandas table with the vertices of the faults.
This file is created by creating fault lines in QGIS with a dipdirection and id,
Using "Extract Vertices" tool to extract the vertices,
and then adding the x and y information using the attribute calculator in the table,
and then exporting to csv.
Needs to contain four columns: id,DipDirecti,X,Y.
id: an identifier for each fault (to which fault does the vertex belong)
DipDirecti: dip direction: East, West, SS (strike slip)
X,Y: the x and y of the fault vertices
SlipParam : How much does the fault slip for the fault length, optional
The default is 0.04.
RefineFault: Should you refine the number of points with which the fault is modeled
RefineDistance: Every how many units should you create another fault vertex
interpType: The type of interpolation used when refining the fault trace.
choose from ‘linear’, ‘nearest’, ‘zero’, ‘slinear’, ‘quadratic’, ‘cubic’, ‘previous’, ‘next’
https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.html
Returns
-------
parametersForGeneratingFaults: a dictionary with lists of Noddy-ready fault parameters.
"""
import pandas as pd
from sklearn.decomposition import PCA
from scipy import interpolate
#################################
# 1. initialize parameters
#################################
# the x, y, z centers of the fault
XList, YList, ZList = [], [], []
# the x, y, trace points of the fault, and the number of points per fault trace
PtXList, PtYList = [], []
nFaultPointsList = []
# for elliptic faults, the radii of the fault
XAxisList, YAxisList, ZAxisList = [], [], []
# the dip, dip direction, and slip
DipList, DipDirectionList, SlipList = [], [], []
# the amplitude, pitch, and profile pitch
AmplitudeList = []
PitchList = []
ProfilePitchList = []
# clean the input data such that it is relative to the point of origin of the model
Data['X'] = Data['X'] - xy_origin[0]
Data['Y'] = Data['Y'] - xy_origin[1]
# Get the number of input faults
Faults = pd.unique(Data['id'])
nFaults = len(Faults)
#################################
# 2. Calculate initialized parameters for each input fault
#################################
for i in range(nFaults):
# select the data points for each fault one at a time
filterV = Data['id'] == Faults[i]
# get the xy points of the fault
xy = Data.loc[filterV, ['X', 'Y']].values
# get the dip direction information (you only need one value)
EastWest = Data.loc[filterV, ['DipDirecti']].values[0, 0]
# Perform a pca on the vertices in order to get the faults aligned on
# a major and minor axis
pca = PCA(2)
pca.fit(xy)
if pca.components_[0, 0] > 0:
pca.components_[0, :] = pca.components_[0, :] * -1
if pca.components_[1, 1] > 0:
pca.components_[1, :] = pca.components_[1, :] * -1
xypca = pca.transform(xy)
# Calculate the dip direction
vectorPCA1 = pca.components_[0, :]
vectorNorth = [0, 1]
if vectorPCA1[0] < 0:
vectorPCA1 = vectorPCA1 * -1
angle = np.math.atan2(np.linalg.det([vectorPCA1, vectorNorth]), np.dot(vectorPCA1, vectorNorth))
angle = np.degrees(angle)
dipdirection = angle + 90
if dipdirection < 0:
dipdirection = dipdirection + 360
# Calculate the fault length
lengthFault = np.max(xypca[:, 0]) - np.min(xypca[:, 0])
# Get the fault center x and y
means = pca.inverse_transform(
[(np.max(xypca[:, 0]) + np.min(xypca[:, 0])) / 2, (np.max(xypca[:, 1]) + np.min(xypca[:, 1])) / 2])
meanX = means[0]
meanY = means[1]
# You need to normalize the input fault data to be between 0-628 for x
# and between -100 to 100 in the y direction.
targetXmin = 0
targetXmax = 628
targetYmin = -100
targetYmax = 100
newRangeX = targetXmax - targetXmin
newRangeY = targetYmax - targetYmin
oldRangeX = (np.max(xypca[:, 0]) - np.min(xypca[:, 0]))
oldRangeY = (np.max(xypca[:, 1]) - np.min(xypca[:, 1]))
xypca[:, 0] = ((xypca[:, 0] - np.min(xypca[:, 0])) / oldRangeX) * newRangeX
# If the fault is straight, it does not need be re-calibrated in the y direction
if oldRangeY < 0.0001:
pass
else:
xypca[:, 1] = ((xypca[:, 1] - np.min(xypca[:, 1])) / oldRangeY) * newRangeY + targetYmin
# The trace needs to be flipped sometimes depending on the dipping direction
if EastWest == 'East':
if dipdirection < 180:
dip = 70
else:
dipdirection = dipdirection - 180
xypca[:, 1] = -1 * xypca[:, 1]
xypca[:, 0] = -1 * xypca[:, 0] + newRangeX
dip = 70
ProfilePitch = 0
Pitch = 90
elif EastWest == 'SS':
if dipdirection < 180:
dip = 80
else:
dipdirection = dipdirection - 180
xypca[:, 1] = -1 * xypca[:, 1]
dip = 80
Pitch = 180
ProfilePitch = 90
else:
if dipdirection > 180:
dip = 70
else:
dipdirection = dipdirection + 180
xypca[:, 1] = -1 * xypca[:, 1]
xypca[:, 0] = -1 * xypca[:, 0] + newRangeX
dip = 70
ProfilePitch = 0
Pitch = 90
# Just to be sure, I'm re-sorting the data by x.
# I'm not sure this is a necessary step.
# This can most definitely be done using numpy and not pandas
xypcapd = pd.DataFrame({'X': xypca[:, 0], 'Y': xypca[:, 1]})
xypcapd = xypcapd.sort_values(['X', 'Y'], ascending='True')
xypca = xypcapd.values
traceXpts = xypca[:, 0]
traceYpts = xypca[:, 1]
# Refine the fault
maxPointsFault = 30
minPointsFault = 2
if RefineFault == True:
nPointsDivide = int(
np.max([np.ceil(np.min([lengthFault / RefineDistance, maxPointsFault])), minPointsFault]))
f = interpolate.interp1d(traceXpts.copy(), traceYpts.copy(), kind='linear')
traceXpts = np.linspace(0, 628, nPointsDivide)
traceYpts = f(traceXpts)
# Add the calculated fault information to the initialized list.
PtXList.append(traceXpts)
PtYList.append(traceYpts)
XList.append(meanX)
YList.append(meanY)
ZList.append(4000)
XAxisList.append(lengthFault / 2)
ZAxisList.append(lengthFault / 2)
YAxisList.append(lengthFault / 2)
DipDirectionList.append(dipdirection)
DipList.append(dip)
SlipList.append(lengthFault * SlipParam)
AmplitudeList.append(oldRangeY / 2)
ProfilePitchList.append(ProfilePitch)
PitchList.append(Pitch)
nFaultPointsList.append(len(traceXpts))
# Return all of the fault information in a list structure
parametersForGeneratingFaults = {}
parametersForGeneratingFaults['nFaults'] = nFaults
parametersForGeneratingFaults['nFaultPoints'] = nFaultPointsList
parametersForGeneratingFaults['PtX'] = PtXList
parametersForGeneratingFaults['PtY'] = PtYList
parametersForGeneratingFaults['X'] = XList
parametersForGeneratingFaults['Y'] = YList
parametersForGeneratingFaults['Z'] = ZList
parametersForGeneratingFaults['XAxis'] = XAxisList
parametersForGeneratingFaults['YAxis'] = YAxisList
parametersForGeneratingFaults['ZAxis'] = ZAxisList
parametersForGeneratingFaults['Dip'] = DipList
parametersForGeneratingFaults['Dip Direction'] = DipDirectionList
parametersForGeneratingFaults['Slip'] = SlipList
parametersForGeneratingFaults['Amplitude'] = AmplitudeList
parametersForGeneratingFaults['Profile Pitch'] = ProfilePitchList
parametersForGeneratingFaults['Pitch'] = PitchList
return parametersForGeneratingFaults
def createPyNoddyHistoryFile(noddyFormattedFaultData, StratDict,
filename='faultmodel.his', joinType='LINES',
cubesize=150, origin=[0, 0, 4000], extent=[9000, 9400, 4000]):
"""
This is a function that created a PyNoddy history file from a dictionary
with information regarding faults and a dictionary with information regarding
the stratigraphy.
Parameters
----------
noddyFormattedFaultData: the output from setUpFaultRepresentation
StratDict: a dictionary with information regarding the stratigraphy, the bottom layer first.
for example:
StratDict = {}
StratDict['Heights'] = [2000, 2500, 3000, 3700]
StratDict['Names'] = ['Intrusive', 'Felsic', 'Mafic','Sed']
StratDict['Density'] = [2.65, 2.5, 2.4, 2.3]
StratDict['MagSus'] = [0.0015, 0.0012, 0.0018, 0.001]
filename: a name for the history file
joinType: in Noddy, fault traces be interpolated via LINES, CURVES, SQUARE
cubesize: the size of the cube that will determine the resolution
origin: the minimum x and y values of the model and the top z point of the model
extent: the extent of the model in the x, y, and z directions
Returns
-------
nothing. Just writes out the history file.
"""
import pynoddy
nFaults = noddyFormattedFaultData['nFaults']
nEvents = nFaults + 1
nLayers = len(StratDict['Names'])
# Open the history file and write out the top header
file1 = open(filename, "w")
headerTxt = pynoddy.history._Templates().header
file1.write(headerTxt + '\n')
# Write out the number of events
numEventsText = "No of Events\t= %d\n" % nEvents
file1.write(numEventsText)
# Make the stratigraphy event
# By copying a template and then replacing the key words identified by $key$
EventTitle = 'Event #1 = STRATIGRAPHY'
file1.write(EventTitle + '\n')
SubTitle = " Num Layers = %d" % nLayers
file1.write(SubTitle + '\n')
for i in range(nLayers):
stratTxt = pynoddy.history._Templates().strati_layerExpanded
stratTxt = stratTxt.replace("$NAME$", StratDict['Names'][i])
stratTxt = stratTxt.replace("$RED$", str(np.random.randint(0, 255)))
stratTxt = stratTxt.replace("$GREEN$", str(np.random.randint(0, 255)))
stratTxt = stratTxt.replace("$BLUE$", str(np.random.randint(0, 255)))
stratTxt = stratTxt.replace("$Height$", "{:.5f}".format(StratDict['Heights'][i]))
stratTxt = stratTxt.replace("$Density$", "{:.5f}".format(StratDict['Density'][i]))
stratTxt = stratTxt.replace("$MagSus$", "{:.5f}".format(StratDict['MagSus'][i]))
file1.write(stratTxt + '\n')
file1.write(" Name = Strat\n")
# Make an event for each fault
FaultProperties = ['X', 'Y', 'Z', 'Dip Direction',
'Dip', 'Slip', 'Amplitude', 'XAxis', 'YAxis',
'ZAxis', 'Profile Pitch', 'Pitch']
for i in range(nFaults):
nPoints = noddyFormattedFaultData['nFaultPoints'][i]
EventTitle = 'Event #%d = FAULT' % (i + 2)
file1.write(EventTitle + '\n')
# start
faultTxt = pynoddy.history._Templates().fault_start
for prop in FaultProperties:
faultTxt = faultTxt.replace("$" + prop + "$", "{:.5f}".format(noddyFormattedFaultData[prop][i]))
faultTxt = faultTxt.replace('$Join Type$', joinType)
file1.write(faultTxt + '\n')
# middle --> add the fault trace information
faultPointTxt = " Num Points = %d" % nPoints
file1.write(faultPointTxt + '\n')
for p in range(nPoints):
ptX = " Point X = " + "{:.5f}".format(noddyFormattedFaultData['PtX'][i][p])
file1.write(ptX + '\n')
ptY = " Point Y = " + "{:.5f}".format(noddyFormattedFaultData['PtY'][i][p])
file1.write(ptY + '\n')
# end
faultTxt = pynoddy.history._Templates().fault_end
faultTxt = faultTxt.replace("$NAME$", 'Fault' + str(i))
file1.write(faultTxt + '\n')
# replace the origin information
footerTxt = pynoddy.history._Templates().footer_expanded
footerTxt = footerTxt.replace('$origin_z$', str(origin[2]))
footerTxt = footerTxt.replace('$extent_x$', str(extent[0]))
footerTxt = footerTxt.replace('$extent_y$', str(extent[1]))
footerTxt = footerTxt.replace('$extent_z$', str(extent[2]))
footerTxt = footerTxt.replace('$cube_size$', str(cubesize))
file1.write(footerTxt)
file1.close()
# ===============================================================================
# Templates for Noddy history file
# ===============================================================================
class _Templates:
header = """#Filename = simple_two_faults.his
#Date Saved = 24/3/2014 14:21:0
FileType = 111
Version = 7.03"""
strati_layer = """ Unit Name = $NAME$
Height = $HEIGHT$
Apply Alterations = ON
Density = $DENSITY$
Anisotropic Field = 0
MagSusX = 1.60e-003
MagSusY = 1.60e-003
MagSusZ = 1.60e-003
MagSus Dip = 9.00e+001
MagSus DipDir = 9.00e+001
MagSus Pitch = 0.00e+000
Remanent Magnetization = 0
Inclination = 30.00
Angle with the Magn. North = 30.00
Strength = 1.60e-003
Color Name = Color 92
Red = 0
Green = 153
Blue = 48 """
strati_layerExpanded = """ Unit Name = $NAME$
Height = $Height$
Apply Alterations = ON
Density = $Density$
Anisotropic Field = 0
MagSusX = $MagSus$
MagSusY = $MagSus$
MagSusZ = $MagSus$
MagSus Dip = 9.00e+001
MagSus DipDir = 9.00e+001
MagSus Pitch = 0.00e+000
Remanent Magnetization = 0
Inclination = 30.00
Angle with the Magn. North = 30.00
Strength = 1.60e-003
Color Name = Color 92
Red = $RED$
Green = $GREEN$
Blue = $BLUE$ """
fault_start = """ Geometry = Curved
Movement = Hanging Wall
X = $X$
Y = $Y$
Z = $Z$
Dip Direction = $Dip Direction$
Dip = $Dip$
Pitch = $Pitch$
Slip = $Slip$
Rotation = 30
Amplitude = $Amplitude$
Radius = 1000
XAxis = $XAxis$
YAxis = $YAxis$
ZAxis = $ZAxis$
Cyl Index = 0.00
Profile Pitch = $Profile Pitch$
Color Name = Custom Colour 8
Red = 0
Green = 0
Blue = 254
Fourier Series
Term A 0 = 0.00
Term B 0 = 0.00
Term A 1 = 0.00
Term B 1 = 1.00
Term A 2 = 0.00
Term B 2 = 0.00
Term A 3 = 0.00
Term B 3 = 0.00
Term A 4 = 0.00
Term B 4 = 0.00
Term A 5 = 0.00
Term B 5 = 0.00
Term A 6 = 0.00
Term B 6 = 0.00
Term A 7 = 0.00
Term B 7 = 0.00
Term A 8 = 0.00
Term B 8 = 0.00
Term A 9 = 0.00
Term B 9 = 0.00
Term A 10 = 0.00
Term B 10 = 0.00
Name = Fault Plane
Type = 1
Join Type = $Join Type$
Graph Length = 200.000000
Min X = 0.000000
Max X = 6.280000
Min Y Scale = -1.000000
Max Y Scale = 1.000000
Scale Origin = 0.000000
Min Y Replace = -1.000000
Max Y Replace = 1.000000"""
fault_end = """ Alteration Type = NONE
Num Profiles = 12
Name = Density
Type = 2
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = 0.000000
Max Y Scale = 4.000000
Scale Origin = 1.000000
Min Y Replace = 0.000000
Max Y Replace = 10.000000
Num Points = 2
Point X = 0
Point Y = -50
Point X = 628
Point Y = -50
Name = Anisotropy
Type = 3
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -10.000000
Max Y Scale = 10.000000
Scale Origin = 0.000000
Min Y Replace = -10.000000
Max Y Replace = 10.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - X Axis (Sus)
Type = 4
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -5.000000
Max Y Scale = 5.000000
Scale Origin = 0.000000
Min Y Replace = 2.000000
Max Y Replace = 8.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - Y Axis (Sus)
Type = 5
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -5.000000
Max Y Scale = 5.000000
Scale Origin = 0.000000
Min Y Replace = 2.000000
Max Y Replace = 8.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - Z Axis (Sus)
Type = 6
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -5.000000
Max Y Scale = 5.000000
Scale Origin = 0.000000
Min Y Replace = 2.000000
Max Y Replace = 8.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - Dip (Sus)
Type = 7
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -180.000000
Max Y Scale = 180.000000
Scale Origin = 1.000000
Min Y Replace = -180.000000
Max Y Replace = 180.000000
Num Points = 2
Point X = 0
Point Y = 1
Point X = 628
Point Y = 1
Name = - Dip Dir (Sus)
Type = 8
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -360.000000
Max Y Scale = 360.000000
Scale Origin = 1.000000
Min Y Replace = -360.000000
Max Y Replace = 360.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - Pitch (Sus)
Type = 9
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -360.000000
Max Y Scale = 360.000000
Scale Origin = 1.000000
Min Y Replace = -360.000000
Max Y Replace = 360.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = Remanence
Type = 10
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -10.000000
Max Y Scale = 10.000000
Scale Origin = 0.000000
Min Y Replace = -10.000000
Max Y Replace = 10.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - Declination (Rem)
Type = 11
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -360.000000
Max Y Scale = 360.000000
Scale Origin = 1.000000
Min Y Replace = -360.000000
Max Y Replace = 360.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - Inclination (Rem)
Type = 12
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -360.000000
Max Y Scale = 360.000000
Scale Origin = 1.000000
Min Y Replace = -360.000000
Max Y Replace = 360.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - Intensity (Rem)
Type = 13
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -5.000000
Max Y Scale = 5.000000
Scale Origin = 0.000000
Min Y Replace = -5.000000
Max Y Replace = 5.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Surface Type = FLAT_SURFACE
Surface Filename =
Surface Directory = \\psf\Home
Surface XDim = 0.000000
Surface YDim = 0.000000
Surface ZDim = 0.000000
Name = $NAME$"""
fault = """ Geometry = $GEOMETRY$
Movement = $MOVEMENT$
X = $POS_X$
Y = $POS_Y$
Z = $POS_Z$
Dip Direction = $DIP_DIR$
Dip = $DIP$
Pitch = 90.00
Slip = $SLIP$
Rotation = $ROTATION$
Amplitude = $AMPLITUDE$
Radius = $RADIUS$
XAxis = $XAXIS$
YAxis = $YAXIS$
ZAxis = $ZAXIS$
Cyl Index = 0.00
Profile Pitch = 90.00
Color Name = Custom Colour 8
Red = 0
Green = 0
Blue = 254
Fourier Series
Term A 0 = 0.00
Term B 0 = 0.00
Term A 1 = 0.00
Term B 1 = 1.00
Term A 2 = 0.00
Term B 2 = 0.00
Term A 3 = 0.00
Term B 3 = 0.00
Term A 4 = 0.00
Term B 4 = 0.00
Term A 5 = 0.00
Term B 5 = 0.00
Term A 6 = 0.00
Term B 6 = 0.00
Term A 7 = 0.00
Term B 7 = 0.00
Term A 8 = 0.00
Term B 8 = 0.00
Term A 9 = 0.00
Term B 9 = 0.00
Term A 10 = 0.00
Term B 10 = 0.00
Name = Fault Plane
Type = 1
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 6.280000
Min Y Scale = -1.000000
Max Y Scale = 1.000000
Scale Origin = 0.000000
Min Y Replace = -1.000000
Max Y Replace = 1.000000
Num Points = 21
Point X = 0
Point Y = 0
Point X = 31
Point Y = 30
Point X = 62
Point Y = 58
Point X = 94
Point Y = 80
Point X = 125
Point Y = 94
Point X = 157
Point Y = 99
Point X = 188
Point Y = 95
Point X = 219
Point Y = 81
Point X = 251
Point Y = 58
Point X = 282
Point Y = 31
Point X = 314
Point Y = 0
Point X = 345
Point Y = -31
Point X = 376
Point Y = -59
Point X = 408
Point Y = -81
Point X = 439
Point Y = -95
Point X = 471
Point Y = -100
Point X = 502
Point Y = -96
Point X = 533
Point Y = -82
Point X = 565
Point Y = -59
Point X = 596
Point Y = -32
Point X = 628
Point Y = -1
Alteration Type = NONE
Num Profiles = 12
Name = Density
Type = 2
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = 0.000000
Max Y Scale = 4.000000
Scale Origin = 1.000000
Min Y Replace = 0.000000
Max Y Replace = 10.000000
Num Points = 2
Point X = 0
Point Y = -50
Point X = 628
Point Y = -50
Name = Anisotropy
Type = 3
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -10.000000
Max Y Scale = 10.000000
Scale Origin = 0.000000
Min Y Replace = -10.000000
Max Y Replace = 10.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - X Axis (Sus)
Type = 4
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -5.000000
Max Y Scale = 5.000000
Scale Origin = 0.000000
Min Y Replace = 2.000000
Max Y Replace = 8.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - Y Axis (Sus)
Type = 5
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -5.000000
Max Y Scale = 5.000000
Scale Origin = 0.000000
Min Y Replace = 2.000000
Max Y Replace = 8.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - Z Axis (Sus)
Type = 6
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -5.000000
Max Y Scale = 5.000000
Scale Origin = 0.000000
Min Y Replace = 2.000000
Max Y Replace = 8.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - Dip (Sus)
Type = 7
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -180.000000
Max Y Scale = 180.000000
Scale Origin = 1.000000
Min Y Replace = -180.000000
Max Y Replace = 180.000000
Num Points = 2
Point X = 0
Point Y = 1
Point X = 628
Point Y = 1
Name = - Dip Dir (Sus)
Type = 8
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -360.000000
Max Y Scale = 360.000000
Scale Origin = 1.000000
Min Y Replace = -360.000000
Max Y Replace = 360.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - Pitch (Sus)
Type = 9
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -360.000000
Max Y Scale = 360.000000
Scale Origin = 1.000000
Min Y Replace = -360.000000
Max Y Replace = 360.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = Remanence
Type = 10
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -10.000000
Max Y Scale = 10.000000
Scale Origin = 0.000000
Min Y Replace = -10.000000
Max Y Replace = 10.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - Declination (Rem)
Type = 11
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -360.000000
Max Y Scale = 360.000000
Scale Origin = 1.000000
Min Y Replace = -360.000000
Max Y Replace = 360.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - Inclination (Rem)
Type = 12
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -360.000000
Max Y Scale = 360.000000
Scale Origin = 1.000000
Min Y Replace = -360.000000
Max Y Replace = 360.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - Intensity (Rem)
Type = 13
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -5.000000
Max Y Scale = 5.000000
Scale Origin = 0.000000
Min Y Replace = -5.000000
Max Y Replace = 5.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Surface Type = FLAT_SURFACE
Surface Filename =
Surface Directory = \\psf\Home
Surface XDim = 0.000000
Surface YDim = 0.000000
Surface ZDim = 0.000000
Name = $NAME$"""
fold = """ Type = Sine
Single Fold = FALSE
X = $POS_X$
Y = $POS_Y$
Z = $POS_Z$
Dip Direction = $DIP_DIR$
Dip = $DIP$
Pitch = 0.00
Wavelength = $WAVELENGTH$
Amplitude = $AMPLITUDE$
Cylindricity = 0.00
Fourier Series
Term A 0 = 0.00
Term B 0 = 0.00
Term A 1 = 0.00
Term B 1 = 1.00
Term A 2 = 0.00
Term B 2 = 0.00
Term A 3 = 0.00
Term B 3 = 0.00
Term A 4 = 0.00
Term B 4 = 0.00
Term A 5 = 0.00
Term B 5 = 0.00
Term A 6 = 0.00
Term B 6 = 0.00
Term A 7 = 0.00
Term B 7 = 0.00
Term A 8 = 0.00
Term B 8 = 0.00
Term A 9 = 0.00
Term B 9 = 0.00
Term A 10 = 0.00
Term B 10 = 0.00
Name = Fold Profile
Type = 1
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 6.280000
Min Y Scale = -1.000000
Max Y Scale = 1.000000
Scale Origin = 0.000000
Min Y Replace = -1.000000
Max Y Replace = 1.000000
Num Points = 21
Point X = 0
Point Y = 0
Point X = 31
Point Y = 30
Point X = 62
Point Y = 58
Point X = 94
Point Y = 80
Point X = 125
Point Y = 94
Point X = 157
Point Y = 99
Point X = 188
Point Y = 95
Point X = 219
Point Y = 81
Point X = 251
Point Y = 58
Point X = 282
Point Y = 31
Point X = 314
Point Y = 0
Point X = 345
Point Y = -31
Point X = 376
Point Y = -59
Point X = 408
Point Y = -81
Point X = 439
Point Y = -95
Point X = 471
Point Y = -100
Point X = 502
Point Y = -96
Point X = 533
Point Y = -82
Point X = 565
Point Y = -59
Point X = 596
Point Y = -32
Point X = 628
Point Y = -1
Name = $NAME$"""
# AK 2014-10
tilt = """X = $POS_X$
Y = $POS_Y$
Z = $POS_Z$
Rotation = $ROTATION$
Plunge Direction = $PLUNGE_DIRECTION$
Plunge = $PLUNGE$
Name = $NAME$"""
unconformity = """X = $POS_X$
Y = $POS_Y$
Z = $POS_Z$
Dip Direction = $DIP_DIRECTION$
Dip = $DIP$
Alteration Type = NONE
Num Profiles = 1
Name =
Type = 0
Join Type = LINES
Graph Length = 0.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = 0.000000
Max Y Scale = 0.000000
Scale Origin = 0.000000
Min Y Replace = 0.000000
Max Y Replace = 0.000000
Num Points = 0
Surface Type = FLAT_SURFACE
Surface Filename =
Surface Directory = /tmp_mnt/sci6/users/mark/Atlas/case
Surface XDim = 0.000000
Surface YDim = 0.000000
Surface ZDim = 0.000000"""
temp = """
Num Layers = 5
Unit Name = UC Base
Height = -32000
Apply Alterations = ON
Density = 3.50e+00
Anisotropic Field = 0
MagSusX = 1.50e-06
MagSusY = 1.60e-03
MagSusZ = 1.60e-03
MagSus Dip = 9.00e+01
MagSus DipDir = 9.00e+01
MagSus Pitch = 0.00e+00
Remanent Magnetization = 0
Inclination = 30.00
Angle with the Magn. North = 30.00
Strength = 1.60e-03
Color Name = Color 98
Red = 84
Green = 153
Blue = 0
Unit Name = UC Layer 1
Height = 5650
Apply Alterations = ON
Density = 3.50e+00
Anisotropic Field = 0
MagSusX = 1.50e-06
MagSusY = 1.60e-03
MagSusZ = 1.60e-03
MagSus Dip = 9.00e+01
MagSus DipDir = 9.00e+01
MagSus Pitch = 0.00e+00
Remanent Magnetization = 0
Inclination = 30.00
Angle with the Magn. North = 30.00
Strength = 1.60e-03
Color Name = Color 68
Red = 204
Green = 117
Blue = 0
Name = $NAME$"""
footer_expanded = """
#BlockOptions
Number of Views = 1
Current View = 0
NAME = Default
Origin X = 0.00
Origin Y = 0.00
Origin Z = $origin_z$
Length X = $extent_x$
Length Y = $extent_y$
Length Z = $extent_z$
Geology Cube Size = $cube_size$
Geophysics Cube Size = $cube_size$
#GeologyOptions
Scale = 10.00
SectionDec = 90.00
WellDepth = 5000.00
WellAngleZ = 0.00
BoreholeX = 0.00
BoreholeX = 0.00
BoreholeX = 5000.00
BoreholeDecl = 90.00
BoreholeDip = 0.00
BoreholeLength = 5000.00
SectionX = 0.00
SectionY = 0.00
SectionZ = 5000.00
SectionDecl = 90.00
SectionLength = 10000.00
SectionHeight = 5000.00
topofile = FALSE
Topo Filename =
Topo Directory = .
Topo Scale = 1.00
Topo Offset = 0.00
Topo First Contour = 100.00
Topo Contour Interval = 100.00
Chair Diagram = FALSE
Chair_X = 5000.00
Chair_Y = 3500.00
Chair_Z = 2500.00
#GeophysicsOptions
GPSRange = 1200
Declination = 0.00
Inclination = -67.00
Intensity = 63000.00
Field Type = FIXED
Field xPos = 0.00
Field yPos = 0.00
Field zPos = 5000.00
Inclination Ori = 0.00
Inclination Change = 0.00
Intensity Ori = 90.00
Intensity Change = 0.00
Declination Ori = 0.00
Declination Change = 0.00
Altitude = 80.00
Airborne= TRUE
Calculation Method = SPATIAL
Spectral Padding Type = RECLECTION_PADDING
Spectral Fence = 100
Spectral Percent = 100
Constant Boxing Depth = 0.00
Clever Boxing Ratio = 1.00
Deformable Remanence= FALSE
Deformable Anisotropy= TRUE
Vector Components= FALSE
Project Vectors= TRUE
Pad With Real Geology= FALSE
Draped Survey= FALSE
#3DOptions
Declination = 150.000000
Elevation = 30.000000
Scale = 1.000000
Offset X = 1.000000
Offset Y = 1.000000
Offset Z = 1.000000
Fill Type = 2
#ProjectOptions
Susceptibility Units = CGS
Geophysical Calculation = 2
Calculation Type = LOCAL_JOB
Length Scale = 0
Printing Scale = 1.000000
Image Scale = 10.000000
New Windows = FALSE
Background Red Component = 254
Background Green Component = 254
Background Blue Component = 254
Internet Address = 255.255.255.255
Account Name =
Noddy Path = ./noddy
Help Path = iexplore %h
Movie Frames Per Event = 3
Movie Play Speed = 10.00
Movie Type = 0
Gravity Clipping Type = RELATIVE_CLIPPING
Gravity Image Display Clip Min = 0.000000
Gravity Image Display Clip Max = 100.000000
Gravity Image Display Type = GREY
Gravity Image Display Num Contour = 25
Magnetics Clipping Type = RELATIVE_CLIPPING
Magnetics Image Display Clip Min = 0.000000
Magnetics Image Display Clip Max = 100.000000
Magnetics Image Display Type = GREY
Magnetics Image Display Num Contour = 25
False Easting = 0.000000
False Northing = 0.000000
#Window Positions
Num Windows = 16
Name = Block Diagram
X = 60
Y = 60
Width = 500
Height = 300
Name = Movie
X = 60
Y = 60
Width = -1
Height = -1
Name = Well Log
X = 60
Y = 60
Width = 400
Height = 430
Name = Section
X = 14
Y = 16
Width = 490
Height = -1
Name = Topography Map
X = 60
Y = 60
Width = 490
Height = 375
Name = 3D Topography Map
X = 60
Y = 60
Width = 490
Height = 375
Name = 3D Stratigraphy
X = 60
Y = 60
Width = 490
Height = 375
Name = Line Map
X = 60
Y = 60
Width = 490
Height = -1
Name = Profile - From Image
X = 60
Y = 60
Width = 490
Height = 600
Name = Sterographic Projections
X = 60
Y = 60
Width = 430
Height = 430
Name = Stratigraphic Column
X = 60
Y = 60
Width = 230
Height = 400
Name = Image
X = 30
Y = 30
Width = -1
Height = -1
Name = Contour
X = 30
Y = 30
Width = -1
Height = -1
Name = Toolbar
X = 10
Y = 0
Width = -1
Height = -1
Name = History
X = 229
Y = 160
Width = 762
Height = 898
Name = History
X = 229
Y = 160
Width = 762
Height = 898
#Icon Positions
Num Icons = 3
Row = 1
Column = 1
X Position = 1
Y Position = 1
Row = 1
Column = 2
X Position = 4
Y Position = 1
Row = 1
Column = 3
X Position = 7
Y Position = 1
Floating Menu Rows = 1
Floating Menu Cols = 24
End of Status Report"""
# everything below events
footer = """
#BlockOptions
Number of Views = 1
Current View = 0
NAME = Default
Origin X = 0.00
Origin Y = 0.00
Origin Z = 5000.00
Length X = 10000.00
Length Y = 7000.00
Length Z = 5000.00
Geology Cube Size = 50.00
Geophysics Cube Size = 50.00
#GeologyOptions
Scale = 10.00
SectionDec = 90.00
WellDepth = 5000.00
WellAngleZ = 0.00
BoreholeX = 0.00
BoreholeX = 0.00
BoreholeX = 5000.00
BoreholeDecl = 90.00
BoreholeDip = 0.00
BoreholeLength = 5000.00
SectionX = 0.00
SectionY = 0.00
SectionZ = 5000.00
SectionDecl = 90.00
SectionLength = 10000.00
SectionHeight = 5000.00
topofile = FALSE
Topo Filename =
Topo Directory = .
Topo Scale = 1.00
Topo Offset = 0.00
Topo First Contour = 100.00
Topo Contour Interval = 100.00
Chair Diagram = FALSE
Chair_X = 5000.00
Chair_Y = 3500.00
Chair_Z = 2500.00
#GeophysicsOptions
GPSRange = 0
Declination = 0.00
Inclination = -67.00
Intensity = 63000.00
Field Type = FIXED
Field xPos = 0.00
Field yPos = 0.00
Field zPos = 5000.00
Inclination Ori = 0.00
Inclination Change = 0.00
Intensity Ori = 90.00
Intensity Change = 0.00
Declination Ori = 0.00
Declination Change = 0.00
Altitude = 80.00
Airborne= FALSE
Calculation Method = SPATIAL
Spectral Padding Type = RECLECTION_PADDING
Spectral Fence = 100
Spectral Percent = 100
Constant Boxing Depth = 0.00
Clever Boxing Ratio = 1.00
Deformable Remanence= FALSE
Deformable Anisotropy= TRUE
Vector Components= FALSE
Project Vectors= TRUE
Pad With Real Geology= FALSE
Draped Survey= FALSE
#3DOptions
Declination = 150.000000
Elevation = 30.000000
Scale = 1.000000
Offset X = 1.000000
Offset Y = 1.000000
Offset Z = 1.000000
Fill Type = 2
#ProjectOptions
Susceptibility Units = CGS
Geophysical Calculation = 2
Calculation Type = LOCAL_JOB
Length Scale = 0
Printing Scale = 1.000000
Image Scale = 10.000000
New Windows = FALSE
Background Red Component = 254
Background Green Component = 254
Background Blue Component = 254
Internet Address = 255.255.255.255
Account Name =
Noddy Path = ./noddy
Help Path = iexplore %h
Movie Frames Per Event = 3
Movie Play Speed = 10.00
Movie Type = 0
Gravity Clipping Type = RELATIVE_CLIPPING
Gravity Image Display Clip Min = 0.000000
Gravity Image Display Clip Max = 100.000000
Gravity Image Display Type = GREY
Gravity Image Display Num Contour = 25
Magnetics Clipping Type = RELATIVE_CLIPPING
Magnetics Image Display Clip Min = 0.000000
Magnetics Image Display Clip Max = 100.000000
Magnetics Image Display Type = GREY
Magnetics Image Display Num Contour = 25
False Easting = 0.000000
False Northing = 0.000000
#Window Positions
Num Windows = 16
Name = Block Diagram
X = 60
Y = 60
Width = 500
Height = 300
Name = Movie
X = 60
Y = 60
Width = -1
Height = -1
Name = Well Log
X = 60
Y = 60
Width = 400
Height = 430
Name = Section
X = 14
Y = 16
Width = 490
Height = -1
Name = Topography Map
X = 60
Y = 60
Width = 490
Height = 375
Name = 3D Topography Map
X = 60
Y = 60
Width = 490
Height = 375
Name = 3D Stratigraphy
X = 60
Y = 60
Width = 490
Height = 375
Name = Line Map
X = 60
Y = 60
Width = 490
Height = -1
Name = Profile - From Image
X = 60
Y = 60
Width = 490
Height = 600
Name = Sterographic Projections
X = 60
Y = 60
Width = 430
Height = 430
Name = Stratigraphic Column
X = 60
Y = 60
Width = 230
Height = 400
Name = Image
X = 30
Y = 30
Width = -1
Height = -1
Name = Contour
X = 30
Y = 30
Width = -1
Height = -1
Name = Toolbar
X = 10
Y = 0
Width = -1
Height = -1
Name = History
X = 229
Y = 160
Width = 762
Height = 898
Name = History
X = 229
Y = 160
Width = 762
Height = 898
#Icon Positions
Num Icons = 3
Row = 1
Column = 1
X Position = 1
Y Position = 1
Row = 1
Column = 2
X Position = 4
Y Position = 1
Row = 1
Column = 3
X Position = 7
Y Position = 1
Floating Menu Rows = 1
Floating Menu Cols = 24
End of Status Report"""
if __name__ == '__main__':
# some testing and debugging:
import os
os.chdir(r'C:\Users\Sam\OneDrive\Documents\Masters\Models\Mt Painter')
H1 = NoddyHistory("mt_pa_simplified.his")
H1.swap_events(2, 3)
H1.write_history("test")
H2 = NoddyHistory("test")
H2.events[10].properties['Radius'] = 2000
H2.write_history("test2")
|
gpl-2.0
|
sandeepdsouza93/TensorFlow-15712
|
tensorflow/examples/learn/iris.py
|
25
|
1649
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import learn
def main(unused_argv):
# Load dataset.
iris = learn.datasets.load_dataset('iris')
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = learn.infer_real_valued_columns_from_input(x_train)
classifier = learn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
|
apache-2.0
|
zhenv5/scikit-learn
|
sklearn/svm/tests/test_svm.py
|
70
|
31674
|
"""
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn.base import ChangedBehaviorWarning
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deteriministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
@ignore_warnings
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1
assert np.abs(score1 - score2) < 0.1
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
# check deprecation warning
clf.decision_function_shape = None
msg = "change the shape of the decision function"
dec = assert_warns_message(ChangedBehaviorWarning, msg,
clf.decision_function, X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_decision_function():
# Test SVR's decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='weighted')
<= metrics.f1_score(y, y_pred_balanced,
average='weighted'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1/L1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2/L2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("L2", "squared_hinge", "loss='L2'", "1.0"),
svm.LinearSVC(loss="L2").fit, X, y)
# LinearSVR
# loss l1/L1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("L1", "epsilon_insensitive", "loss='L1'",
"1.0"),
svm.LinearSVR(loss="L1").fit, X, y)
# loss l2/L2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
# FIXME remove in 0.18
def test_linear_svx_uppercase_loss_penalty():
# Check if Upper case notation is supported by _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
# loss SQUARED_hinge --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("SQUARED_hinge", "squared_hinge", "0.18"),
svm.LinearSVC(loss="SQUARED_hinge").fit, X, y)
# penalty L2 --> l2
assert_warns_message(DeprecationWarning,
msg.replace("loss", "penalty")
% ("L2", "l2", "0.18"),
svm.LinearSVC(penalty="L2").fit, X, y)
# loss EPSILON_INSENSITIVE --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("EPSILON_INSENSITIVE", "epsilon_insensitive",
"0.18"),
svm.LinearSVR(loss="EPSILON_INSENSITIVE").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
|
bsd-3-clause
|
tarthy6/dozer-thesis
|
examples/old/concrete/interaction-histogram.py
|
3
|
1277
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# demonstration of the woo.post2d module (see its documentation for details)
#
import pylab # the matlab-like interface of matplotlib
pylab.ioff()
import numpy
import os.path
# run uniax.py to get this file
loadFile='/tmp/uniax-tension.woo.gz'
if not os.path.exists(loadFile): raise RuntimeError("Run uniax.py first so that %s is created"%loadFile)
O.load(loadFile)
# axis normal to the plane in which we do the histogram
axis=0 # x, i.e. plot the yz plane
ax1,ax2=(axis+1)%3,(axis+2)%3 ## get the other two indices, i.e. 1 and 2 in this case
angles,forces=[],[]
for i in O.interactions:
if not i.isReal: continue
norm=i.geom.normal
angle=atan(norm[ax2]/norm[ax1])
force=i.phys.normalForce.norm()
angles.append(angle)
forces.append(force)
# easier: plain histogram
#pylab.hist(angles,weights=forces,bins=20)
# polar histogram
pylab.figure()
## prepare data
values,bins=numpy.histogram(angles,weights=forces,bins=20)
## prepare polar plot
pylab.subplot(111,polar=True);
## plot bar chart, with the histogram data
### bins has one edge extra, remove it: [:-1]
pylab.bar(left=bins[:-1],height=values,width=.7*pi/20);
# predefined function
pylab.figure()
utils.plotDirections(noShow=True).savefig('/tmp/a.pdf')
pylab.show()
|
gpl-2.0
|
asaluja/compositional-semantics
|
compute_composed.py
|
1
|
15954
|
#!/usr/bin/python -tt
'''
File: compute_composed.py
Date: July 25, 2014
Description: this script returns the composed representations for
a list of word pairs, given parameters. Optionally, we can also
print out the pairwise similarities among the top N phrases, sorted by
frequency (i.e., order in which they are read in).
Update (August 22, 2014): converted this script into a class representing
composed representations for better modularity/code reuse.
This file can also be run by itself; it assumes the word vectors in the first
argument, the compositional model parameters as the second argument, and a
list of POS-tagged phrases for which representations need to be computed in
STDIN; if we are doing additive or multiplicative models, then we don't need
to POS-tag the phrases.
'''
import sys, commands, string, cPickle, getopt, math
import pylab as plt
import matplotlib as mpl
import numpy as np
class PhraseTree:
def __init__(self, wordVector, tag, left=None, right=None):
self.vector = wordVector
self.tag = tag
self.left = left
self.right = right
self.parent = None
if left is not None:
left.parent = self
if right is not None:
right.parent = self
class CompoModel:
def __init__(self, params_file, concat = False, normalize = True, headed = False, rightBranch = False, multModel = False, addModel = False):
self.wordVecs = {} #key is word, value is vector rep of word
self.contextVecs = {}
self.phraseVecs = {} #key is (phrase, pos_pair) tuple, value is vector rep of (phrase, pos_pair)
#if not multModel and not addModel:
self.parameters = cPickle.load(open(params_file, 'rb')) #dictionary; value is a tuple of parameters-intercept
self.concat = concat
self.headed = headed
self.normalize = normalize
self.dimensions = self.parameters["X X"][0].shape[0]
self.rightBranch = rightBranch
self.multModel = multModel
self.addModel = addModel
def readVecFile(self, filename, vecType = "word"):
fh = open(filename, 'r')
vecs = {}
for line in fh:
if len(line.strip().split()) > 2:
word = line.strip().split()[0]
rep = np.array([float(i) for i in line.strip().split()[1:]])
vecs[word] = np.divide(rep, np.linalg.norm(rep)) if self.normalize else rep
if self.normalize and np.linalg.norm(rep) == 0:
vecs[word] = np.zeros(len(rep))
if vecType == "word":
self.wordVecs = vecs
else:
self.contextVecs = vecs
def checkVocab(self, phrase):
words = phrase.split()
for word in words:
if word not in self.wordVecs:
return False
else:
return True
def checkTag(self, prev_tag, tag):
new_tag = "NN" if tag == "NN" and (prev_tag == "JJ" or prev_tag == "DT" or prev_tag == "NN") else "X"
return new_tag
def constructPhraseTree(self, phrase, pos_seq):
maxChildren = 2
numChildren = 0
root = None
prevNode = None
words = phrase.split()
pos_tags = pos_seq.split()
if self.rightBranch:
words.reverse()
pos_tags.reverse()
for idx, word in enumerate(words):
tag = pos_tags[idx]
node = PhraseTree(self.wordVecs[word], tag)
numChildren += 1
if numChildren == maxChildren: #create parent
parent_tag = self.checkTag(prevNode.tag, tag)
parent = PhraseTree(np.zeros(self.dimensions), parent_tag, prevNode, node) if not self.rightBranch else PhraseTree(np.zeros(self.dimensions), parent_tag, node, prevNode)
if root is None: #previously unassigned
root = parent
else:
new_root_tag = self.checkTag(root.tag, tag)
new_root = PhraseTree(np.zeros(self.dimensions), new_root_tag, root, parent) if not self.rightBranch else PhraseTree(np.zeros(self.dimensions), new_root_tag, parent, root)
root = new_root
numChildren = 0
prevNode = node
if prevNode.parent is None: #now, need to attach any unattached nodes
if root is None:
return prevNode
else:
new_root_tag = self.checkTag(root.tag, prevNode.tag) #root is None condition for unattached single nodes (unigram phrases)
new_root = PhraseTree(np.zeros(self.dimensions), new_root_tag, root, prevNode) if not self.rightBranch else PhraseTree(np.zeros(self.dimensions), new_root_tag, prevNode, root)
root = new_root
return root
def computePhraseTreeRep(self, phraseTree):
if phraseTree is None: return
if phraseTree.left is None and phraseTree.right is None: #leaf
return phraseTree.vector
self.computePhraseTreeRep(phraseTree.left)
self.computePhraseTreeRep(phraseTree.right)
wordVec1 = phraseTree.left.vector
wordVec2 = phraseTree.right.vector
pos_tags = [phraseTree.left.tag, phraseTree.right.tag]
if self.headed:
headIdx = self.computeHeadedRep(pos_tags)
if headIdx > -1: #if headIdx == -1, then we computeComposed anyway
phraseTree.vector = wordVec1 if headIdx == 0 else wordVec2
return phraseTree.vector
key = ' '.join(pos_tags)
key = "X X" if key not in self.parameters else key
parameter, intercept = self.parameters[key]
if self.concat:
argument = np.concatenate((wordVec1, wordVec2), axis=1)
result = np.dot(parameter, argument.transpose())
else:
result = np.tensordot(wordVec2, parameter, axes=[0,2])
result = np.dot(result, wordVec1)
result += intercept
if self.normalize:
result = np.divide(result, np.linalg.norm(result))
phraseTree.vector = result
return result
def computeHeadedRep(self, pos_words):
if "NN" in pos_words:
if "JJ" in pos_words or "DT" in pos_words or sum([element == "NN" for element in pos_words]) == len(pos_words):
return 1
elif "VV" in pos_words:
return 0
else:
return -1
else:
return -1
'This function assumes that checkVocab(phrase) returns true; that should be called before this'
def computeComposedRep(self, phrase, pos_seq):
if (phrase, pos_seq) in self.phraseVecs:
return self.phraseVecs[(phrase, pos_seq)]
else:
result = None
if self.addModel:
result = self.computeSimpleRep(phrase, "add")
elif self.multModel:
result = self.computeSimpleRep(phrase, "mult")
else:
phraseTree = self.constructPhraseTree(phrase, pos_seq)
result = self.computePhraseTreeRep(phraseTree)
self.phraseVecs[(phrase, pos_seq)] = result
return result
'For point-wise additive/multiplicative models'
def computeSimpleRep(self, phrase, operator):
if (phrase, operator) in self.phraseVecs:
return self.phraseVecs[(phrase, operator)]
else:
words = phrase.split()
result_dim = self.wordVecs[words[0]].shape
result = np.zeros(result_dim) if operator == "add" else np.ones(result_dim)
for word in words:
result = result + self.wordVecs[word] if operator == "add" else np.multiply(result, self.wordVecs[word])
if self.normalize:
result = np.divide(result, np.linalg.norm(result))
self.phraseVecs[(phrase, operator)] = result
return result
def computePairwiseSimilarities(topN):
for phrase, pos_pair in self.phraseVecs:
phraseSims = []
print "Phrase '%s' top %d similarities: "%(phrase, topN)
for phrase2, pos_pair2 in self.phraseVecs.keys():
if phrase != phrase2:
phraseRep1 = self.phraseVecs[(phrase, pos_pair)]
phraseRep2 = self.phraseVecs[(phrase2, pos_pair2)]
phraseSim = np.divide(np.dot(phraseRep1, phraseRep2), np.linalg.norm(phraseRep1) * np.linalg.norm(phraseRep2))
phraseSims.append((phrase2, phraseSim))
phraseSims.sort(key = lambda x:x[1], reverse=True)
topNPhraseSims = phraseSims[:topN]
for phraseSim in topNPhraseSims:
print "%s: %.3f\t"%(phraseSim[0], phraseSim[1]),
print
def printVector(self, phrase, rep):
print "%s"%phrase,
for idx in xrange(0, len(rep)):
print " %.6f"%(rep[idx]),
print
def visualizeParameters(self, outFile_root, chartsPerRow, chartsPerCol):
chartsPerCell = chartsPerRow * chartsPerCol
numCharts = self.dimensions
num_subplots = int(math.ceil(float(numCharts) / chartsPerCell))
for pos_pair in self.parameters:
pos_file = '_'.join(pos_pair.split())
outFH = mpl.backends.backend_pdf.PdfPages(outFile_root + ".%s.pdf"%pos_file)
parameter, intercept = self.parameters[pos_pair]
print "POS Pair: %s"%pos_pair
if self.concat:
#left_mat = -parameter[:,:self.dimensions]
left_mat = parameter[:,:self.dimensions]
print "Left Mat max: %.3f; Min: %.3f"%(np.max(left_mat), np.min(left_mat))
left_mat[left_mat==0] = np.nan
#right_mat = -parameter[:,self.dimensions:]
right_mat = parameter[:,self.dimensions:]
print "Right Mat max: %.3f; Min: %.3f"%(np.max(right_mat), np.min(right_mat))
right_mat[right_mat==0] = np.nan
f, axes_tuples = plt.subplots(1, 1)
ax1 = axes_tuples
#cmap = plt.cm.get_cmap('RdBu')
cmap = plt.cm.get_cmap('binary')
heatmap = np.ma.array(left_mat, mask=np.isnan(left_mat))
cmap.set_bad('w', 1.)
ax1.pcolor(heatmap, cmap=cmap, alpha=0.8)
ax1.set_title('First Word')
plt.tight_layout()
outFH.savefig()
f, axes_tuples = plt.subplots(1,1)
ax1 = axes_tuples
#cmap = plt.cm.get_cmap('RdBu')
cmap = plt.cm.get_cmap('binary')
heatmap = np.ma.array(right_mat, mask=np.isnan(right_mat))
cmap.set_bad('w', 1.)
ax1.pcolor(heatmap, cmap=cmap, alpha=0.8)
ax1.set_title('Second Word')
plt.tight_layout()
outFH.savefig()
outFH.close()
else:
for sp in xrange(num_subplots):
chartNum = 0
coordinate = sp*chartsPerCell
f, axes_tuples = plt.subplots(chartsPerCol, chartsPerRow, sharey=True, sharex=True)
while chartNum < chartsPerCell:
chartX = chartNum / chartsPerRow #truncates to nearest integer
chartY = chartNum % chartsPerRow
ax1 = axes_tuples[chartX][chartY]
cmap = plt.cm.get_cmap('RdBu')
maxVal = 0
minVal = 0
if coordinate < numCharts:
param = parameter[coordinate, :, :]
maxVal = param[param!=0].max()
minVal = param[param!=0].min()
param[param==0] = np.nan
param = -param #negate values of parameters, since we want red to indicate high values
heatmap = np.ma.array(param, mask=np.isnan(param))
cmap.set_bad('w', 1.)
ax1.pcolor(heatmap, cmap=cmap, alpha=0.8)
else:
param = np.zeros((numCharts, numCharts))
ax1.pcolor(param, cmap=cmap, alpha=0.8)
ax1.set_title('Dim. %d; Max: %.3f; Min: %.3f'%(coordinate+1, maxVal, minVal))
ax1.set_xlim([0, numCharts])
ax1.set_ylim([0, numCharts])
ax1.set_ylabel('Left W')
ax1.set_xlabel('Right W')
chartNum += 1
coordinate += 1
plt.tight_layout()
outFH.savefig()
outFH.close()
def main():
(opts, args) = getopt.getopt(sys.argv[1:], 'acmp:')
topN = -1
additive = False
concat = False
multiplicative = False
for opt in opts:
if opt[0] == '-p':
topN = int(opt[1])
elif opt[0] == '-a':
additive = True
elif opt[0] == '-m':
multiplicative = True
elif opt[0] == '-c':
concat = True
if multiplicative and additive: #not possible
sys.stderr.write("Error: Cannot have both '-a' and '-m' (additive and multiplicative) flags on!\n")
sys.exit()
model = CompoModel(args[1], concat, True)
model.readVecFile(args[0])
numExamples = 0
numInVocab = 0
numValidPOS = 0
for line in sys.stdin:
elements = line.strip().split()
if len(elements) == 2:
numExamples += 1
words = [word_pos.split('_')[0] for word_pos in elements]
pos_tags = [word_pos.split('_')[1] for word_pos in elements]
phrase = ' '.join(words)
if model.checkVocab(phrase):
numInVocab += 1
rep = None
if multiplicative:
rep = model.computeSimpleRep(phrase, "multiply")
elif additive:
rep = model.computeSimpleRep(phrase, "add")
else: #to do: change this section to handle generic POS pairs
contains_noun = "NN" in pos_tags or "NNS" in pos_tags or "NNP" in pos_tags or "NNPS" in pos_tags
if contains_noun:
if "JJ" in pos_tags or "JJR" in pos_tags or "JJS" in pos_tags:
if pos_tags[1] == "JJ" or pos_tags[1] == "JJR" or pos_tags[1] == "JJS": #wrong ordering, invert the ordering
words.reverse()
pos_tags.reverse()
rep = model.computeComposedRep(phrase, "JJ NN")
numValidPOS += 1
else:
valid = True
for pos_tag in pos_tags:
valid = valid & (pos_tag == "NN" or pos_tag == "NNS" or pos_tag == "NNPS")
if valid:
rep = model.computeComposedRep(phrase, "NN NN")
numValidPOS += 1
if topN < 0:
printVector(phrase, rep)
sys.stderr.write("Out of %d examples, %d are in the vocab, and %d of those have the correct POS sequence (if '-a' or '-m' flag on, then POS # doesn't matter)\n"%(numExamples, numInVocab, numValidPOS))
if topN > 0: #i.e., pairwise similarities need to be computed
model.computePairwiseSimilarities(topN)
if __name__ == "__main__":
main()
|
apache-2.0
|
xzh86/scikit-learn
|
examples/missing_values.py
|
233
|
3056
|
"""
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
|
bsd-3-clause
|
dragonaire/humdata
|
run_fts.py
|
1
|
6310
|
import os.path
import pandas as pd
from datetime import date
from distutils import dir_util
from resources import constants
from utils import data_utils
def loadDataByDimension(dimension):
"""
Given a dimension of funding data (e.g. clusters/donors/recipients), load the data for each country.
Return a dict of country code to pandas dataframe for the funding data along the given dimension.
"""
if dimension not in constants.FTS_SCHEMAS.keys():
raise Exception('Not a valid funding dimension for downloaded data from FTS: {}!'.format(dimension))
schema = constants.FTS_SCHEMAS[dimension]
data_dir = os.path.join(constants.LATEST_RAW_DATA_PATH, constants.FTS_DIR)
date_str = date.today().isoformat()
with open(os.path.join(data_dir, constants.FTS_DOWNLOAD_DATE_FILE), 'r') as f:
date_str = f.read().strip()
data = {}
for code, country in constants.COUNTRY_CODES.iteritems():
print country
file_name = '-'.join([constants.FTS_FILE_PREFIX, code, dimension, date_str])
file_path = os.path.join(data_dir, '{}.csv'.format(file_name))
df = pd.read_csv(file_path, encoding='utf-8')
data[country] = df
return data
def loadDataByCountryCode(country_code):
"""
Given a country, load the data for each funding dimension.
Return a dict of funding dimension to pandas dataframe for the funding data for the given country.
"""
if country_code not in constants.COUNTRY_CODES.keys():
if country_code not in constants.COUNTRY_CODES.values():
raise Exception('Not a valid country code for downloaded data from FTS: {}!'.format(country))
else:
# Convert country name to country code
country_code = constants.COUNTRY_CODES.values().index(country_code)
data_dir = os.path.join(constants.LATEST_RAW_DATA_PATH, constants.FTS_DIR)
date_str = date.today().isoformat()
with open(os.path.join(data_dir, constants.FTS_DOWNLOAD_DATE_FILE), 'r') as f:
date_str = f.read().strip()
data = {}
for dimension, schema in constants.FTS_SCHEMAS.iteritems():
file_name = '-'.join([constants.FTS_FILE_PREFIX, country_code, dimension, date_str])
file_path = os.path.join(data_dir, '{}.csv'.format(file_name))
df = pd.read_csv(file_path, encoding='utf-8')
data[dimension] = df
return data
def combineData(data, column):
"""
Combine given data across a particular column, where data is a dictionary from keys to dataframes,
and the given column corresponds to a column name for the keys of the data dict, e.g. 'Country' or 'Dimension'.
Returns a single dataframe that combines all the dataframes in the given data.
"""
combined_df = pd.DataFrame()
for key, df in data.iteritems():
df[column] = key
combined_df = combined_df.append(df)
return combined_df
def updateLatestDataDir(download_path, current_date_str):
"""
Copies all files from the given download_path into the latest data directory configured in
`resources/constants.py`. Appends to the run_dates.txt file with the current run date.
"""
if not download_path or not current_date_str:
print 'Could not copy latest data for this run to the latest data directory!'
return
dir_util.copy_tree(download_path, constants.LATEST_DERIVED_DATA_PATH)
with open(constants.LATEST_DERIVED_RUN_DATE_FILE, 'a') as run_file:
run_file.write('{}-fts\n'.format(current_date_str))
return
def createCurrentDateDir(parent_dir):
"""
Create a new directory with the current date (ISO format) under the given parent_dir.
Return whether it was successful, the full path for the new directory, and the current date string.
If the date directory already exists or is not successful, default to returning the parent_dir as the full path.
"""
# Create a new directory of the current date under the given parent_dir if it doesn't already exist
current_date_str = date.today().isoformat()
dir_path = os.path.join(parent_dir, current_date_str)
success = data_utils.safely_mkdir(dir_path)
if not success:
# TODO: handle this better
# Safely default to returning the parent_dir if we cannot create the dir_path
print 'Could not create a new directory for the current date [{}], defaulting to existing parent dir: {}'.format(current_date_str, parent_dir)
dir_path = parent_dir
else:
print 'Created new derived data dir: {}'.format(dir_path)
return success, dir_path, current_date_str
def saveDerivedData(data, dir_path):
"""
Save the derived data into a new dated directory under the given parent_dir (defaults to DERIVED_DATA_PATH configured in `resources/constants.py`).
Return whether any data saving was successful.
"""
# Save data to dated directory under the given parent_dir
success = False
for dimension, df in data.iteritems():
df_path = os.path.join(dir_path, 'fts-{}.csv'.format(dimension))
print 'Saving derived data for dimension [{}] to: {}'.format(dimension, df_path)
df.to_csv(df_path, index=False, encoding='utf-8')
success = True
return success
def run():
print 'Load and process downloaded data from FTS'
# Create current date directory
print 'Create current date directory as the download path...'
_, download_path, current_date_str = createCurrentDateDir(constants.DERIVED_DATA_PATH)
print 'Load data by dimension...'
data_by_dimension = {}
for dimension, schema in constants.FTS_SCHEMAS.iteritems():
data_for_dimension = loadDataByDimension(dimension)
print 'Combine data for dimension [{}] across all countries...'.format(dimension)
data_by_dimension[dimension] = combineData(data_for_dimension, constants.COUNTRY_COL)
print data_by_dimension[dimension]
success = saveDerivedData(data_by_dimension, download_path)
if success:
print 'Copy data from {} to {}...'.format(download_path, constants.LATEST_DERIVED_DATA_PATH)
updateLatestDataDir(download_path, current_date_str)
#dir_util.copy_tree(download_path, constants.EXAMPLE_DERIVED_DATA_PATH)
print 'Done!'
if __name__ == "__main__":
run()
|
mit
|
sdvillal/manysources
|
setup.py
|
1
|
1440
|
#!/usr/bin/env python2
# coding=utf-8
# Authors: Floriane Montanari <[email protected]>
# Santi Villalba <[email protected]>
# Licence: BSD 3 clause
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import manysources
setup(
name='manysources',
license='BSD 3 clause',
description='model<->example<->prediction interactions with a chemical twist',
long_description=open('README.rst').read().replace('|Build Status| |Coverage Status|', ''),
version=manysources.__version__,
url='https://github.com/sdvillal/whatami',
author='Floriane Montanari <[email protected]>, Santi Villalba <[email protected]>',
classifiers=[
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'License :: OSI Approved',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Operating System :: Unix',
],
install_requires=[
'h5py',
'scipy',
'numpy',
'pandas',
'joblib',
'matplotlib',
'seaborn',
'scikit-learn',
'whatami',
'argh',
'rdkit',
'numba',
'cytoolz',
'tsne',
'networkx',
],
tests_require=['pytest'],
platforms=['Any'],
)
|
bsd-3-clause
|
mjgrav2001/scikit-learn
|
sklearn/cluster/tests/test_birch.py
|
342
|
5603
|
"""
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
|
bsd-3-clause
|
pratapvardhan/pandas
|
pandas/tests/extension/base/getitem.py
|
2
|
7975
|
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from .base import BaseExtensionTests
class BaseGetitemTests(BaseExtensionTests):
"""Tests for ExtensionArray.__getitem__."""
def test_iloc_series(self, data):
ser = pd.Series(data)
result = ser.iloc[:4]
expected = pd.Series(data[:4])
self.assert_series_equal(result, expected)
result = ser.iloc[[0, 1, 2, 3]]
self.assert_series_equal(result, expected)
def test_iloc_frame(self, data):
df = pd.DataFrame({"A": data, 'B':
np.arange(len(data), dtype='int64')})
expected = pd.DataFrame({"A": data[:4]})
# slice -> frame
result = df.iloc[:4, [0]]
self.assert_frame_equal(result, expected)
# sequence -> frame
result = df.iloc[[0, 1, 2, 3], [0]]
self.assert_frame_equal(result, expected)
expected = pd.Series(data[:4], name='A')
# slice -> series
result = df.iloc[:4, 0]
self.assert_series_equal(result, expected)
# sequence -> series
result = df.iloc[:4, 0]
self.assert_series_equal(result, expected)
def test_loc_series(self, data):
ser = pd.Series(data)
result = ser.loc[:3]
expected = pd.Series(data[:4])
self.assert_series_equal(result, expected)
result = ser.loc[[0, 1, 2, 3]]
self.assert_series_equal(result, expected)
def test_loc_frame(self, data):
df = pd.DataFrame({"A": data,
'B': np.arange(len(data), dtype='int64')})
expected = pd.DataFrame({"A": data[:4]})
# slice -> frame
result = df.loc[:3, ['A']]
self.assert_frame_equal(result, expected)
# sequence -> frame
result = df.loc[[0, 1, 2, 3], ['A']]
self.assert_frame_equal(result, expected)
expected = pd.Series(data[:4], name='A')
# slice -> series
result = df.loc[:3, 'A']
self.assert_series_equal(result, expected)
# sequence -> series
result = df.loc[:3, 'A']
self.assert_series_equal(result, expected)
def test_getitem_scalar(self, data):
result = data[0]
assert isinstance(result, data.dtype.type)
result = pd.Series(data)[0]
assert isinstance(result, data.dtype.type)
def test_getitem_scalar_na(self, data_missing, na_cmp, na_value):
result = data_missing[0]
assert na_cmp(result, na_value)
def test_getitem_mask(self, data):
# Empty mask, raw array
mask = np.zeros(len(data), dtype=bool)
result = data[mask]
assert len(result) == 0
assert isinstance(result, type(data))
# Empty mask, in series
mask = np.zeros(len(data), dtype=bool)
result = pd.Series(data)[mask]
assert len(result) == 0
assert result.dtype == data.dtype
# non-empty mask, raw array
mask[0] = True
result = data[mask]
assert len(result) == 1
assert isinstance(result, type(data))
# non-empty mask, in series
result = pd.Series(data)[mask]
assert len(result) == 1
assert result.dtype == data.dtype
def test_getitem_slice(self, data):
# getitem[slice] should return an array
result = data[slice(0)] # empty
assert isinstance(result, type(data))
result = data[slice(1)] # scalar
assert isinstance(result, type(data))
def test_get(self, data):
# GH 20882
s = pd.Series(data, index=[2 * i for i in range(len(data))])
assert s.get(4) == s.iloc[2]
result = s.get([4, 6])
expected = s.iloc[[2, 3]]
self.assert_series_equal(result, expected)
result = s.get(slice(2))
expected = s.iloc[[0, 1]]
self.assert_series_equal(result, expected)
assert s.get(-1) is None
assert s.get(s.index.max() + 1) is None
s = pd.Series(data[:6], index=list('abcdef'))
assert s.get('c') == s.iloc[2]
result = s.get(slice('b', 'd'))
expected = s.iloc[[1, 2, 3]]
self.assert_series_equal(result, expected)
result = s.get('Z')
assert result is None
assert s.get(4) == s.iloc[4]
assert s.get(-1) == s.iloc[-1]
assert s.get(len(s)) is None
# GH 21257
s = pd.Series(data)
s2 = s[::2]
assert s2.get(1) is None
def test_take_sequence(self, data):
result = pd.Series(data)[[0, 1, 3]]
assert result.iloc[0] == data[0]
assert result.iloc[1] == data[1]
assert result.iloc[2] == data[3]
def test_take(self, data, na_value, na_cmp):
result = data.take([0, -1])
assert result.dtype == data.dtype
assert result[0] == data[0]
assert result[1] == data[-1]
result = data.take([0, -1], allow_fill=True, fill_value=na_value)
assert result[0] == data[0]
assert na_cmp(result[1], na_value)
with tm.assert_raises_regex(IndexError, "out of bounds"):
data.take([len(data) + 1])
def test_take_empty(self, data, na_value, na_cmp):
empty = data[:0]
result = empty.take([-1], allow_fill=True)
assert na_cmp(result[0], na_value)
with pytest.raises(IndexError):
empty.take([-1])
with tm.assert_raises_regex(IndexError, "cannot do a non-empty take"):
empty.take([0, 1])
def test_take_negative(self, data):
# https://github.com/pandas-dev/pandas/issues/20640
n = len(data)
result = data.take([0, -n, n - 1, -1])
expected = data.take([0, 0, n - 1, n - 1])
self.assert_extension_array_equal(result, expected)
def test_take_non_na_fill_value(self, data_missing):
fill_value = data_missing[1] # valid
na = data_missing[0]
array = data_missing._from_sequence([na, fill_value, na])
result = array.take([-1, 1], fill_value=fill_value, allow_fill=True)
expected = array.take([1, 1])
self.assert_extension_array_equal(result, expected)
def test_take_pandas_style_negative_raises(self, data, na_value):
with pytest.raises(ValueError):
data.take([0, -2], fill_value=na_value, allow_fill=True)
@pytest.mark.parametrize('allow_fill', [True, False])
def test_take_out_of_bounds_raises(self, data, allow_fill):
arr = data[:3]
with pytest.raises(IndexError):
arr.take(np.asarray([0, 3]), allow_fill=allow_fill)
def test_take_series(self, data):
s = pd.Series(data)
result = s.take([0, -1])
expected = pd.Series(
data._from_sequence([data[0], data[len(data) - 1]]),
index=[0, len(data) - 1])
self.assert_series_equal(result, expected)
def test_reindex(self, data, na_value):
s = pd.Series(data)
result = s.reindex([0, 1, 3])
expected = pd.Series(data.take([0, 1, 3]), index=[0, 1, 3])
self.assert_series_equal(result, expected)
n = len(data)
result = s.reindex([-1, 0, n])
expected = pd.Series(
data._from_sequence([na_value, data[0], na_value]),
index=[-1, 0, n])
self.assert_series_equal(result, expected)
result = s.reindex([n, n + 1])
expected = pd.Series(data._from_sequence([na_value, na_value]),
index=[n, n + 1])
self.assert_series_equal(result, expected)
def test_reindex_non_na_fill_value(self, data_missing):
valid = data_missing[1]
na = data_missing[0]
array = data_missing._from_sequence([na, valid])
ser = pd.Series(array)
result = ser.reindex([0, 1, 2], fill_value=valid)
expected = pd.Series(data_missing._from_sequence([na, valid, valid]))
self.assert_series_equal(result, expected)
|
bsd-3-clause
|
mgaitan/scipy
|
scipy/cluster/hierarchy.py
|
6
|
91762
|
"""
========================================================
Hierarchical clustering (:mod:`scipy.cluster.hierarchy`)
========================================================
.. currentmodule:: scipy.cluster.hierarchy
These functions cut hierarchical clusterings into flat clusterings
or find the roots of the forest formed by a cut by providing the flat
cluster ids of each observation.
.. autosummary::
:toctree: generated/
fcluster
fclusterdata
leaders
These are routines for agglomerative clustering.
.. autosummary::
:toctree: generated/
linkage
single
complete
average
weighted
centroid
median
ward
These routines compute statistics on hierarchies.
.. autosummary::
:toctree: generated/
cophenet
from_mlab_linkage
inconsistent
maxinconsts
maxdists
maxRstat
to_mlab_linkage
Routines for visualizing flat clusters.
.. autosummary::
:toctree: generated/
dendrogram
These are data structures and routines for representing hierarchies as
tree objects.
.. autosummary::
:toctree: generated/
ClusterNode
leaves_list
to_tree
These are predicates for checking the validity of linkage and
inconsistency matrices as well as for checking isomorphism of two
flat cluster assignments.
.. autosummary::
:toctree: generated/
is_valid_im
is_valid_linkage
is_isomorphic
is_monotonic
correspond
num_obs_linkage
Utility routines for plotting:
.. autosummary::
:toctree: generated/
set_link_color_palette
References
----------
.. [1] "Statistics toolbox." API Reference Documentation. The MathWorks.
http://www.mathworks.com/access/helpdesk/help/toolbox/stats/.
Accessed October 1, 2007.
.. [2] "Hierarchical clustering." API Reference Documentation.
The Wolfram Research, Inc.
http://reference.wolfram.com/mathematica/HierarchicalClustering/tutorial/
HierarchicalClustering.html.
Accessed October 1, 2007.
.. [3] Gower, JC and Ross, GJS. "Minimum Spanning Trees and Single Linkage
Cluster Analysis." Applied Statistics. 18(1): pp. 54--64. 1969.
.. [4] Ward Jr, JH. "Hierarchical grouping to optimize an objective
function." Journal of the American Statistical Association. 58(301):
pp. 236--44. 1963.
.. [5] Johnson, SC. "Hierarchical clustering schemes." Psychometrika.
32(2): pp. 241--54. 1966.
.. [6] Sneath, PH and Sokal, RR. "Numerical taxonomy." Nature. 193: pp.
855--60. 1962.
.. [7] Batagelj, V. "Comparing resemblance measures." Journal of
Classification. 12: pp. 73--90. 1995.
.. [8] Sokal, RR and Michener, CD. "A statistical method for evaluating
systematic relationships." Scientific Bulletins. 38(22):
pp. 1409--38. 1958.
.. [9] Edelbrock, C. "Mixture model tests of hierarchical clustering
algorithms: the problem of classifying everybody." Multivariate
Behavioral Research. 14: pp. 367--84. 1979.
.. [10] Jain, A., and Dubes, R., "Algorithms for Clustering Data."
Prentice-Hall. Englewood Cliffs, NJ. 1988.
.. [11] Fisher, RA "The use of multiple measurements in taxonomic
problems." Annals of Eugenics, 7(2): 179-188. 1936
* MATLAB and MathWorks are registered trademarks of The MathWorks, Inc.
* Mathematica is a registered trademark of The Wolfram Research, Inc.
"""
from __future__ import division, print_function, absolute_import
# Copyright (C) Damian Eads, 2007-2008. New BSD License.
# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com)
#
# Author: Damian Eads
# Date: September 22, 2007
#
# Copyright (c) 2007, 2008, Damian Eads
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# - Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# - Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
import numpy as np
from . import _hierarchy
import scipy.spatial.distance as distance
from scipy._lib.six import string_types
from scipy._lib.six import xrange
_cpy_non_euclid_methods = {'single': 0, 'complete': 1, 'average': 2,
'weighted': 6}
_cpy_euclid_methods = {'centroid': 3, 'median': 4, 'ward': 5}
_cpy_linkage_methods = set(_cpy_non_euclid_methods.keys()).union(
set(_cpy_euclid_methods.keys()))
__all__ = ['ClusterNode', 'average', 'centroid', 'complete', 'cophenet',
'correspond', 'dendrogram', 'fcluster', 'fclusterdata',
'from_mlab_linkage', 'inconsistent', 'is_isomorphic',
'is_monotonic', 'is_valid_im', 'is_valid_linkage', 'leaders',
'leaves_list', 'linkage', 'maxRstat', 'maxdists', 'maxinconsts',
'median', 'num_obs_linkage', 'set_link_color_palette', 'single',
'to_mlab_linkage', 'to_tree', 'ward', 'weighted', 'distance']
def _warning(s):
warnings.warn('scipy.cluster: %s' % s, stacklevel=3)
def _copy_array_if_base_present(a):
"""
Copies the array if its base points to a parent array.
"""
if a.base is not None:
return a.copy()
elif np.issubsctype(a, np.float32):
return np.array(a, dtype=np.double)
else:
return a
def _copy_arrays_if_base_present(T):
"""
Accepts a tuple of arrays T. Copies the array T[i] if its base array
points to an actual array. Otherwise, the reference is just copied.
This is useful if the arrays are being passed to a C function that
does not do proper striding.
"""
l = [_copy_array_if_base_present(a) for a in T]
return l
def _randdm(pnts):
""" Generates a random distance matrix stored in condensed form. A
pnts * (pnts - 1) / 2 sized vector is returned.
"""
if pnts >= 2:
D = np.random.rand(pnts * (pnts - 1) / 2)
else:
raise ValueError("The number of points in the distance matrix "
"must be at least 2.")
return D
def single(y):
"""
Performs single/min/nearest linkage on the condensed distance matrix ``y``
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
The linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='single', metric='euclidean')
def complete(y):
"""
Performs complete/max/farthest point linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage
"""
return linkage(y, method='complete', metric='euclidean')
def average(y):
"""
Performs average/UPGMA linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='average', metric='euclidean')
def weighted(y):
"""
Performs weighted/WPGMA linkage on the condensed distance matrix.
See ``linkage`` for more information on the return
structure and algorithm.
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='weighted', metric='euclidean')
def centroid(y):
"""
Performs centroid/UPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = centroid(y)``
Performs centroid/UPGMC linkage on the condensed distance
matrix ``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = centroid(X)``
Performs centroid/UPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See ``linkage``
for more information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='centroid', metric='euclidean')
def median(y):
"""
Performs median/WPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = median(y)``
Performs median/WPGMC linkage on the condensed distance matrix
``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = median(X)``
Performs median/WPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See linkage
for more information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='median', metric='euclidean')
def ward(y):
"""
Performs Ward's linkage on a condensed or redundant distance matrix.
See linkage for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = ward(y)``
Performs Ward's linkage on the condensed distance matrix ``Z``. See
linkage for more information on the return structure and
algorithm.
2. ``Z = ward(X)``
Performs Ward's linkage on the observation matrix ``X`` using
Euclidean distance as the distance metric. See linkage for more
information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='ward', metric='euclidean')
def linkage(y, method='single', metric='euclidean'):
"""
Performs hierarchical/agglomerative clustering on the condensed
distance matrix y.
y must be a :math:`{n \\choose 2}` sized
vector where n is the number of original observations paired
in the distance matrix. The behavior of this function is very
similar to the MATLAB linkage function.
An :math:`(n-1)` by 4 matrix ``Z`` is returned. At the
:math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and
``Z[i, 1]`` are combined to form cluster :math:`n + i`. A
cluster with an index less than :math:`n` corresponds to one of
the :math:`n` original observations. The distance between
clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The
fourth value ``Z[i, 3]`` represents the number of original
observations in the newly formed cluster.
The following linkage methods are used to compute the distance
:math:`d(s, t)` between two clusters :math:`s` and
:math:`t`. The algorithm begins with a forest of clusters that
have yet to be used in the hierarchy being formed. When two
clusters :math:`s` and :math:`t` from this forest are combined
into a single cluster :math:`u`, :math:`s` and :math:`t` are
removed from the forest, and :math:`u` is added to the
forest. When only one cluster remains in the forest, the algorithm
stops, and this cluster becomes the root.
A distance matrix is maintained at each iteration. The ``d[i,j]``
entry corresponds to the distance between cluster :math:`i` and
:math:`j` in the original forest.
At each iteration, the algorithm must update the distance matrix
to reflect the distance of the newly formed cluster u with the
remaining clusters in the forest.
Suppose there are :math:`|u|` original observations
:math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and
:math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in
cluster :math:`v`. Recall :math:`s` and :math:`t` are
combined to form cluster :math:`u`. Let :math:`v` be any
remaining cluster in the forest that is not :math:`u`.
The following are methods for calculating the distance between the
newly formed cluster :math:`u` and each :math:`v`.
* method='single' assigns
.. math::
d(u,v) = \\min(dist(u[i],v[j]))
for all points :math:`i` in cluster :math:`u` and
:math:`j` in cluster :math:`v`. This is also known as the
Nearest Point Algorithm.
* method='complete' assigns
.. math::
d(u, v) = \\max(dist(u[i],v[j]))
for all points :math:`i` in cluster u and :math:`j` in
cluster :math:`v`. This is also known by the Farthest Point
Algorithm or Voor Hees Algorithm.
* method='average' assigns
.. math::
d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])}
{(|u|*|v|)}
for all points :math:`i` and :math:`j` where :math:`|u|`
and :math:`|v|` are the cardinalities of clusters :math:`u`
and :math:`v`, respectively. This is also called the UPGMA
algorithm.
* method='weighted' assigns
.. math::
d(u,v) = (dist(s,v) + dist(t,v))/2
where cluster u was formed with cluster s and t and v
is a remaining cluster in the forest. (also called WPGMA)
* method='centroid' assigns
.. math::
dist(s,t) = ||c_s-c_t||_2
where :math:`c_s` and :math:`c_t` are the centroids of
clusters :math:`s` and :math:`t`, respectively. When two
clusters :math:`s` and :math:`t` are combined into a new
cluster :math:`u`, the new centroid is computed over all the
original objects in clusters :math:`s` and :math:`t`. The
distance then becomes the Euclidean distance between the
centroid of :math:`u` and the centroid of a remaining cluster
:math:`v` in the forest. This is also known as the UPGMC
algorithm.
* method='median' assigns :math:`d(s,t)` like the ``centroid``
method. When two clusters :math:`s` and :math:`t` are combined
into a new cluster :math:`u`, the average of centroids s and t
give the new centroid :math:`u`. This is also known as the
WPGMC algorithm.
* method='ward' uses the Ward variance minimization algorithm.
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \\sqrt{\\frac{|v|+|s|}
{T}d(v,s)^2
+ \\frac{|v|+|t|}
{T}d(v,t)^2
- \\frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
Warning: When the minimum distance pair in the forest is chosen, there
may be two or more pairs with the same minimum distance. This
implementation may chose a different minimum than the MATLAB
version.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed distance matrix
is a flat array containing the upper triangular of the distance matrix.
This is the form that ``pdist`` returns. Alternatively, a collection of
:math:`m` observation vectors in n dimensions may be passed as an
:math:`m` by :math:`n` array.
method : str, optional
The linkage algorithm to use. See the ``Linkage Methods`` section below
for full descriptions.
metric : str or function, optional
The distance metric to use. See the ``distance.pdist`` function for a
list of valid distance metrics. The customized distance can also be
used. See the ``distance.pdist`` function for details.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
"""
if not isinstance(method, string_types):
raise TypeError("Argument 'method' must be a string.")
y = _convert_to_double(np.asarray(y, order='c'))
s = y.shape
if len(s) == 1:
distance.is_valid_y(y, throw=True, name='y')
d = distance.num_obs_y(y)
if method not in _cpy_non_euclid_methods:
raise ValueError("Valid methods when the raw observations are "
"omitted are 'single', 'complete', 'weighted', "
"and 'average'.")
# Since the C code does not support striding using strides.
[y] = _copy_arrays_if_base_present([y])
Z = np.zeros((d - 1, 4))
if method == 'single':
_hierarchy.slink(y, Z, int(d))
else:
_hierarchy.linkage(y, Z, int(d),
int(_cpy_non_euclid_methods[method]))
elif len(s) == 2:
X = y
n = s[0]
if method not in _cpy_linkage_methods:
raise ValueError('Invalid method: %s' % method)
if method in _cpy_non_euclid_methods:
dm = distance.pdist(X, metric)
Z = np.zeros((n - 1, 4))
if method == 'single':
_hierarchy.slink(dm, Z, n)
else:
_hierarchy.linkage(dm, Z, n,
int(_cpy_non_euclid_methods[method]))
elif method in _cpy_euclid_methods:
if metric != 'euclidean':
raise ValueError(("Method '%s' requires the distance metric "
"to be euclidean") % method)
dm = distance.pdist(X, metric)
Z = np.zeros((n - 1, 4))
_hierarchy.linkage(dm, Z, n,
int(_cpy_euclid_methods[method]))
return Z
class ClusterNode:
"""
A tree node class for representing a cluster.
Leaf nodes correspond to original observations, while non-leaf nodes
correspond to non-singleton clusters.
The to_tree function converts a matrix returned by the linkage
function into an easy-to-use tree representation.
See Also
--------
to_tree : for converting a linkage matrix ``Z`` into a tree object.
"""
def __init__(self, id, left=None, right=None, dist=0, count=1):
if id < 0:
raise ValueError('The id must be non-negative.')
if dist < 0:
raise ValueError('The distance must be non-negative.')
if (left is None and right is not None) or \
(left is not None and right is None):
raise ValueError('Only full or proper binary trees are permitted.'
' This node has one child.')
if count < 1:
raise ValueError('A cluster must contain at least one original '
'observation.')
self.id = id
self.left = left
self.right = right
self.dist = dist
if self.left is None:
self.count = count
else:
self.count = left.count + right.count
def get_id(self):
"""
The identifier of the target node.
For ``0 <= i < n``, `i` corresponds to original observation i.
For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed
at iteration ``i-n``.
Returns
-------
id : int
The identifier of the target node.
"""
return self.id
def get_count(self):
"""
The number of leaf nodes (original observations) belonging to
the cluster node nd. If the target node is a leaf, 1 is
returned.
Returns
-------
get_count : int
The number of leaf nodes below the target node.
"""
return self.count
def get_left(self):
"""
Return a reference to the left child tree object.
Returns
-------
left : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.left
def get_right(self):
"""
Returns a reference to the right child tree object.
Returns
-------
right : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.right
def is_leaf(self):
"""
Returns True if the target node is a leaf.
Returns
-------
leafness : bool
True if the target node is a leaf node.
"""
return self.left is None
def pre_order(self, func=(lambda x: x.id)):
"""
Performs pre-order traversal without recursive function calls.
When a leaf node is first encountered, ``func`` is called with
the leaf node as its argument, and its result is appended to
the list.
For example, the statement::
ids = root.pre_order(lambda x: x.id)
returns a list of the node ids corresponding to the leaf nodes
of the tree as they appear from left to right.
Parameters
----------
func : function
Applied to each leaf ClusterNode object in the pre-order traversal.
Given the i'th leaf node in the pre-ordeR traversal ``n[i]``, the
result of func(n[i]) is stored in L[i]. If not provided, the index
of the original observation to which the node corresponds is used.
Returns
-------
L : list
The pre-order traversal.
"""
# Do a preorder traversal, caching the result. To avoid having to do
# recursion, we'll store the previous index we've visited in a vector.
n = self.count
curNode = [None] * (2 * n)
lvisited = set()
rvisited = set()
curNode[0] = self
k = 0
preorder = []
while k >= 0:
nd = curNode[k]
ndid = nd.id
if nd.is_leaf():
preorder.append(func(nd))
k = k - 1
else:
if ndid not in lvisited:
curNode[k + 1] = nd.left
lvisited.add(ndid)
k = k + 1
elif ndid not in rvisited:
curNode[k + 1] = nd.right
rvisited.add(ndid)
k = k + 1
# If we've visited the left and right of this non-leaf
# node already, go up in the tree.
else:
k = k - 1
return preorder
_cnode_bare = ClusterNode(0)
_cnode_type = type(ClusterNode)
def to_tree(Z, rd=False):
"""
Converts a hierarchical clustering encoded in the matrix ``Z`` (by
linkage) into an easy-to-use tree object.
The reference r to the root ClusterNode object is returned.
Each ClusterNode object has a left, right, dist, id, and count
attribute. The left and right attributes point to ClusterNode objects
that were combined to generate the cluster. If both are None then
the ClusterNode object is a leaf node, its count must be 1, and its
distance is meaningless but set to 0.
Note: This function is provided for the convenience of the library
user. ClusterNodes are not used as input to any of the functions in this
library.
Parameters
----------
Z : ndarray
The linkage matrix in proper form (see the ``linkage``
function documentation).
rd : bool, optional
When False, a reference to the root ClusterNode object is
returned. Otherwise, a tuple (r,d) is returned. ``r`` is a
reference to the root node while ``d`` is a dictionary
mapping cluster ids to ClusterNode references. If a cluster id is
less than n, then it corresponds to a singleton cluster
(leaf node). See ``linkage`` for more information on the
assignment of cluster ids to clusters.
Returns
-------
L : list
The pre-order traversal.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# The number of original objects is equal to the number of rows minus
# 1.
n = Z.shape[0] + 1
# Create a list full of None's to store the node objects
d = [None] * (n * 2 - 1)
# Create the nodes corresponding to the n original objects.
for i in xrange(0, n):
d[i] = ClusterNode(i)
nd = None
for i in xrange(0, n - 1):
fi = int(Z[i, 0])
fj = int(Z[i, 1])
if fi > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 0') % fi)
if fj > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 1') % fj)
nd = ClusterNode(i + n, d[fi], d[fj], Z[i, 2])
# ^ id ^ left ^ right ^ dist
if Z[i, 3] != nd.count:
raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is '
'incorrect.') % i)
d[n + i] = nd
if rd:
return (nd, d)
else:
return nd
def _convert_to_bool(X):
if X.dtype != bool:
X = X.astype(bool)
if not X.flags.contiguous:
X = X.copy()
return X
def _convert_to_double(X):
if X.dtype != np.double:
X = X.astype(np.double)
if not X.flags.contiguous:
X = X.copy()
return X
def cophenet(Z, Y=None):
"""
Calculates the cophenetic distances between each observation in
the hierarchical clustering defined by the linkage ``Z``.
Suppose ``p`` and ``q`` are original observations in
disjoint clusters ``s`` and ``t``, respectively and
``s`` and ``t`` are joined by a direct parent cluster
``u``. The cophenetic distance between observations
``i`` and ``j`` is simply the distance between
clusters ``s`` and ``t``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as an array
(see ``linkage`` function).
Y : ndarray (optional)
Calculates the cophenetic correlation coefficient ``c`` of a
hierarchical clustering defined by the linkage matrix `Z`
of a set of :math:`n` observations in :math:`m`
dimensions. `Y` is the condensed distance matrix from which
`Z` was generated.
Returns
-------
c : ndarray
The cophentic correlation distance (if ``y`` is passed).
d : ndarray
The cophenetic distance matrix in condensed form. The
:math:`ij` th entry is the cophenetic distance between
original observations :math:`i` and :math:`j`.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
zz = np.zeros((n * (n - 1)) // 2, dtype=np.double)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
Z = _convert_to_double(Z)
_hierarchy.cophenetic_distances(Z, zz, int(n))
if Y is None:
return zz
Y = np.asarray(Y, order='c')
distance.is_valid_y(Y, throw=True, name='Y')
z = zz.mean()
y = Y.mean()
Yy = Y - y
Zz = zz - z
numerator = (Yy * Zz)
denomA = Yy ** 2
denomB = Zz ** 2
c = numerator.sum() / np.sqrt((denomA.sum() * denomB.sum()))
return (c, zz)
def inconsistent(Z, d=2):
"""
Calculates inconsistency statistics on a linkage.
Note: This function behaves similarly to the MATLAB(TM)
inconsistent function.
Parameters
----------
Z : ndarray
The :math:`(n-1)` by 4 matrix encoding the linkage
(hierarchical clustering). See ``linkage`` documentation
for more information on its form.
d : int, optional
The number of links up to `d` levels below each
non-singleton cluster.
Returns
-------
R : ndarray
A :math:`(n-1)` by 5 matrix where the ``i``'th row
contains the link statistics for the non-singleton cluster
``i``. The link statistics are computed over the link
heights for links :math:`d` levels below the cluster
``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard
deviation of the link heights, respectively; ``R[i,2]`` is
the number of links included in the calculation; and
``R[i,3]`` is the inconsistency coefficient,
.. math:: \\frac{\\mathtt{Z[i,2]}-\\mathtt{R[i,0]}} {R[i,1]}
"""
Z = np.asarray(Z, order='c')
Zs = Z.shape
is_valid_linkage(Z, throw=True, name='Z')
if (not d == np.floor(d)) or d < 0:
raise ValueError('The second argument d must be a nonnegative '
'integer value.')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
n = Zs[0] + 1
R = np.zeros((n - 1, 4), dtype=np.double)
_hierarchy.inconsistent(Z, R, int(n), int(d))
return R
def from_mlab_linkage(Z):
"""
Converts a linkage matrix generated by MATLAB(TM) to a new
linkage matrix compatible with this module.
The conversion does two things:
* the indices are converted from ``1..N`` to ``0..(N-1)`` form,
and
* a fourth column Z[:,3] is added where Z[i,3] is represents the
number of original observations (leaves) in the non-singleton
cluster i.
This function is useful when loading in linkages from legacy data
files generated by MATLAB.
Parameters
----------
Z : ndarray
A linkage matrix generated by MATLAB(TM).
Returns
-------
ZS : ndarray
A linkage matrix compatible with this library.
"""
Z = np.asarray(Z, dtype=np.double, order='c')
Zs = Z.shape
# If it's empty, return it.
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
if len(Zs) != 2:
raise ValueError("The linkage array must be rectangular.")
# If it contains no rows, return it.
if Zs[0] == 0:
return Z.copy()
Zpart = Z.copy()
if Zpart[:, 0:2].min() != 1.0 and Zpart[:, 0:2].max() != 2 * Zs[0]:
raise ValueError('The format of the indices is not 1..N')
Zpart[:, 0:2] -= 1.0
CS = np.zeros((Zs[0],), dtype=np.double)
_hierarchy.calculate_cluster_sizes(Zpart, CS, int(Zs[0]) + 1)
return np.hstack([Zpart, CS.reshape(Zs[0], 1)])
def to_mlab_linkage(Z):
"""
Converts a linkage matrix to a MATLAB(TM) compatible one.
Converts a linkage matrix ``Z`` generated by the linkage function
of this module to a MATLAB(TM) compatible one. The return linkage
matrix has the last column removed and the cluster indices are
converted to ``1..N`` indexing.
Parameters
----------
Z : ndarray
A linkage matrix generated by this library.
Returns
-------
to_mlab_linkage : ndarray
A linkage matrix compatible with MATLAB(TM)'s hierarchical
clustering functions.
The return linkage matrix has the last column removed
and the cluster indices are converted to ``1..N`` indexing.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
Zs = Z.shape
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
is_valid_linkage(Z, throw=True, name='Z')
ZP = Z[:, 0:3].copy()
ZP[:, 0:2] += 1.0
return ZP
def is_monotonic(Z):
"""
Returns True if the linkage passed is monotonic.
The linkage is monotonic if for every cluster :math:`s` and :math:`t`
joined, the distance between them is no less than the distance
between any previously joined clusters.
Parameters
----------
Z : ndarray
The linkage matrix to check for monotonicity.
Returns
-------
b : bool
A boolean indicating whether the linkage is monotonic.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# We expect the i'th value to be greater than its successor.
return (Z[1:, 2] >= Z[:-1, 2]).all()
def is_valid_im(R, warning=False, throw=False, name=None):
"""Returns True if the inconsistency matrix passed is valid.
It must be a :math:`n` by 4 numpy array of doubles. The standard
deviations ``R[:,1]`` must be nonnegative. The link counts
``R[:,2]`` must be positive and no greater than :math:`n-1`.
Parameters
----------
R : ndarray
The inconsistency matrix to check for validity.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
"""
R = np.asarray(R, order='c')
valid = True
name_str = "%r " % name if name else ''
try:
if type(R) != np.ndarray:
raise TypeError('Variable %spassed as inconsistency matrix is not '
'a numpy array.' % name_str)
if R.dtype != np.double:
raise TypeError('Inconsistency matrix %smust contain doubles '
'(double).' % name_str)
if len(R.shape) != 2:
raise ValueError('Inconsistency matrix %smust have shape=2 (i.e. '
'be two-dimensional).' % name_str)
if R.shape[1] != 4:
raise ValueError('Inconsistency matrix %smust have 4 columns.' %
name_str)
if R.shape[0] < 1:
raise ValueError('Inconsistency matrix %smust have at least one '
'row.' % name_str)
if (R[:, 0] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'height means.' % name_str)
if (R[:, 1] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'height standard deviations.' % name_str)
if (R[:, 2] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'counts.' % name_str)
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def is_valid_linkage(Z, warning=False, throw=False, name=None):
"""
Checks the validity of a linkage matrix.
A linkage matrix is valid if it is a two dimensional
ndarray (type double) with :math:`n`
rows and 4 columns. The first two columns must contain indices
between 0 and :math:`2n-1`. For a given row ``i``,
:math:`0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1`
and :math:`0 \\leq Z[i,1] \\leq i+n-1`
(i.e. a cluster cannot join another cluster unless the cluster
being joined has been generated.)
Parameters
----------
Z : array_like
Linkage matrix.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True iff the inconsistency matrix is valid.
"""
Z = np.asarray(Z, order='c')
valid = True
name_str = "%r " % name if name else ''
try:
if type(Z) != np.ndarray:
raise TypeError('Passed linkage argument %sis not a valid array.' %
name_str)
if Z.dtype != np.double:
raise TypeError('Linkage matrix %smust contain doubles.' % name_str)
if len(Z.shape) != 2:
raise ValueError('Linkage matrix %smust have shape=2 (i.e. be '
'two-dimensional).' % name_str)
if Z.shape[1] != 4:
raise ValueError('Linkage matrix %smust have 4 columns.' % name_str)
if Z.shape[0] == 0:
raise ValueError('Linkage must be computed on at least two '
'observations.')
n = Z.shape[0]
if n > 1:
if ((Z[:, 0] < 0).any() or (Z[:, 1] < 0).any()):
raise ValueError('Linkage %scontains negative indices.' %
name_str)
if (Z[:, 2] < 0).any():
raise ValueError('Linkage %scontains negative distances.' %
name_str)
if (Z[:, 3] < 0).any():
raise ValueError('Linkage %scontains negative counts.' %
name_str)
if _check_hierarchy_uses_cluster_before_formed(Z):
raise ValueError('Linkage %suses non-singleton cluster before '
'it is formed.' % name_str)
if _check_hierarchy_uses_cluster_more_than_once(Z):
raise ValueError('Linkage %suses the same cluster more than once.'
% name_str)
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def _check_hierarchy_uses_cluster_before_formed(Z):
n = Z.shape[0] + 1
for i in xrange(0, n - 1):
if Z[i, 0] >= n + i or Z[i, 1] >= n + i:
return True
return False
def _check_hierarchy_uses_cluster_more_than_once(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
if (Z[i, 0] in chosen) or (Z[i, 1] in chosen) or Z[i, 0] == Z[i, 1]:
return True
chosen.add(Z[i, 0])
chosen.add(Z[i, 1])
return False
def _check_hierarchy_not_all_clusters_used(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
chosen.add(int(Z[i, 0]))
chosen.add(int(Z[i, 1]))
must_chosen = set(range(0, 2 * n - 2))
return len(must_chosen.difference(chosen)) > 0
def num_obs_linkage(Z):
"""
Returns the number of original observations of the linkage matrix
passed.
Parameters
----------
Z : ndarray
The linkage matrix on which to perform the operation.
Returns
-------
n : int
The number of original observations in the linkage.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
return (Z.shape[0] + 1)
def correspond(Z, Y):
"""
Checks for correspondence between linkage and condensed distance matrices
They must have the same number of original observations for
the check to succeed.
This function is useful as a sanity check in algorithms that make
extensive use of linkage and distance matrices that must
correspond to the same set of original observations.
Parameters
----------
Z : array_like
The linkage matrix to check for correspondence.
Y : array_like
The condensed distance matrix to check for correspondence.
Returns
-------
b : bool
A boolean indicating whether the linkage matrix and distance
matrix could possibly correspond to one another.
"""
is_valid_linkage(Z, throw=True)
distance.is_valid_y(Y, throw=True)
Z = np.asarray(Z, order='c')
Y = np.asarray(Y, order='c')
return distance.num_obs_y(Y) == num_obs_linkage(Z)
def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):
"""
Forms flat clusters from the hierarchical clustering defined by
the linkage matrix ``Z``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded with the matrix returned
by the `linkage` function.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
The criterion to use in forming flat clusters. This can
be any of the following values:
``inconsistent`` : If a cluster node and all its
descendants have an inconsistent value less than or equal
to `t` then all its leaf descendants belong to the
same flat cluster. When no non-singleton cluster meets
this criterion, every node is assigned to its own
cluster. (Default)
``distance`` : Forms flat clusters so that the original
observations in each flat cluster have no greater a
cophenetic distance than `t`.
``maxclust`` : Finds a minimum threshold ``r`` so that
the cophenetic distance between any two original
observations in the same flat cluster is no more than
``r`` and no more than `t` flat clusters are formed.
``monocrit`` : Forms a flat cluster from a cluster node c
with index i when ``monocrit[j] <= t``.
For example, to threshold on the maximum mean distance
as computed in the inconsistency matrix R with a
threshold of 0.8 do:
MR = maxRstat(Z, R, 3)
cluster(Z, t=0.8, criterion='monocrit', monocrit=MR)
``maxclust_monocrit`` : Forms a flat cluster from a
non-singleton cluster node ``c`` when ``monocrit[i] <=
r`` for all cluster indices ``i`` below and including
``c``. ``r`` is minimized such that no more than ``t``
flat clusters are formed. monocrit must be
monotonic. For example, to minimize the threshold t on
maximum inconsistency values so that no more than 3 flat
clusters are formed, do:
MI = maxinconsts(Z, R)
cluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)
depth : int, optional
The maximum depth to perform the inconsistency calculation.
It has no meaning for the other criteria. Default is 2.
R : ndarray, optional
The inconsistency matrix to use for the 'inconsistent'
criterion. This matrix is computed if not provided.
monocrit : ndarray, optional
An array of length n-1. `monocrit[i]` is the
statistics upon which non-singleton i is thresholded. The
monocrit vector must be monotonic, i.e. given a node c with
index i, for all node indices j corresponding to nodes
below c, `monocrit[i] >= monocrit[j]`.
Returns
-------
fcluster : ndarray
An array of length n. T[i] is the flat cluster number to
which original observation i belongs.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
T = np.zeros((n,), dtype='i')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
if criterion == 'inconsistent':
if R is None:
R = inconsistent(Z, depth)
else:
R = np.asarray(R, order='c')
is_valid_im(R, throw=True, name='R')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[R] = _copy_arrays_if_base_present([R])
_hierarchy.cluster_in(Z, R, T, float(t), int(n))
elif criterion == 'distance':
_hierarchy.cluster_dist(Z, T, float(t), int(n))
elif criterion == 'maxclust':
_hierarchy.cluster_maxclust_dist(Z, T, int(n), int(t))
elif criterion == 'monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n))
elif criterion == 'maxclust_monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t))
else:
raise ValueError('Invalid cluster formation criterion: %s'
% str(criterion))
return T
def fclusterdata(X, t, criterion='inconsistent',
metric='euclidean', depth=2, method='single', R=None):
"""
Cluster observation data using a given metric.
Clusters the original observations in the n-by-m data
matrix X (n observations in m dimensions), using the euclidean
distance metric to calculate distances between original observations,
performs hierarchical clustering using the single linkage algorithm,
and forms flat clusters using the inconsistency method with `t` as the
cut-off threshold.
A one-dimensional array T of length n is returned. T[i] is the index
of the flat cluster to which the original observation i belongs.
Parameters
----------
X : (N, M) ndarray
N by M data matrix with N observations in M dimensions.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
Specifies the criterion for forming flat clusters. Valid
values are 'inconsistent' (default), 'distance', or 'maxclust'
cluster formation algorithms. See `fcluster` for descriptions.
metric : str, optional
The distance metric for calculating pairwise distances. See
`distance.pdist` for descriptions and linkage to verify
compatibility with the linkage method.
depth : int, optional
The maximum depth for the inconsistency calculation. See
`inconsistent` for more information.
method : str, optional
The linkage method to use (single, complete, average,
weighted, median centroid, ward). See `linkage` for more
information. Default is "single".
R : ndarray, optional
The inconsistency matrix. It will be computed if necessary
if it is not passed.
Returns
-------
fclusterdata : ndarray
A vector of length n. T[i] is the flat cluster number to
which original observation i belongs.
Notes
-----
This function is similar to the MATLAB function clusterdata.
"""
X = np.asarray(X, order='c', dtype=np.double)
if type(X) != np.ndarray or len(X.shape) != 2:
raise TypeError('The observation matrix X must be an n by m numpy '
'array.')
Y = distance.pdist(X, metric=metric)
Z = linkage(Y, method=method)
if R is None:
R = inconsistent(Z, d=depth)
else:
R = np.asarray(R, order='c')
T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t)
return T
def leaves_list(Z):
"""
Returns a list of leaf node ids
The return corresponds to the observation vector index as it appears
in the tree from left to right. Z is a linkage matrix.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. `Z` is
a linkage matrix. See ``linkage`` for more information.
Returns
-------
leaves_list : ndarray
The list of leaf node ids.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
ML = np.zeros((n,), dtype='i')
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.prelist(Z, ML, int(n))
return ML
# Maps number of leaves to text size.
#
# p <= 20, size="12"
# 20 < p <= 30, size="10"
# 30 < p <= 50, size="8"
# 50 < p <= np.inf, size="6"
_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5}
_drotation = {20: 0, 40: 45, np.inf: 90}
_dtextsortedkeys = list(_dtextsizes.keys())
_dtextsortedkeys.sort()
_drotationsortedkeys = list(_drotation.keys())
_drotationsortedkeys.sort()
def _remove_dups(L):
"""
Removes duplicates AND preserves the original order of the elements.
The set class is not guaranteed to do this.
"""
seen_before = set([])
L2 = []
for i in L:
if i not in seen_before:
seen_before.add(i)
L2.append(i)
return L2
def _get_tick_text_size(p):
for k in _dtextsortedkeys:
if p <= k:
return _dtextsizes[k]
def _get_tick_rotation(p):
for k in _drotationsortedkeys:
if p <= k:
return _drotation[k]
def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation,
no_labels, color_list, leaf_font_size=None,
leaf_rotation=None, contraction_marks=None,
ax=None, above_threshold_color='b'):
# Import matplotlib here so that it's not imported unless dendrograms
# are plotted. Raise an informative error if importing fails.
try:
# if an axis is provided, don't use pylab at all
if ax is None:
import matplotlib.pylab
import matplotlib.patches
import matplotlib.collections
except ImportError:
raise ImportError("You must install the matplotlib library to plot the dendrogram. Use no_plot=True to calculate the dendrogram without plotting.")
if ax is None:
ax = matplotlib.pylab.gca()
# if we're using pylab, we want to trigger a draw at the end
trigger_redraw = True
else:
trigger_redraw = False
# Independent variable plot width
ivw = len(ivl) * 10
# Depenendent variable plot height
dvw = mh + mh * 0.05
ivticks = np.arange(5, len(ivl) * 10 + 5, 10)
if orientation == 'top':
ax.set_ylim([0, dvw])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(ivticks)
ax.set_xticklabels(ivl)
ax.xaxis.set_ticks_position('bottom')
lbls = ax.get_xticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
else:
leaf_rot = float(_get_tick_rotation(len(ivl)))
map(lambda lbl: lbl.set_rotation(leaf_rot), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
else:
leaf_fs = float(_get_tick_text_size(len(ivl)))
map(lambda lbl: lbl.set_rotation(leaf_fs), lbls)
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
elif orientation == 'bottom':
ax.set_ylim([dvw, 0])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(ivticks)
ax.set_xticklabels(ivl)
lbls = ax.get_xticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
else:
leaf_rot = float(_get_tick_rotation(p))
map(lambda lbl: lbl.set_rotation(leaf_rot), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
else:
leaf_fs = float(_get_tick_text_size(p))
map(lambda lbl: lbl.set_rotation(leaf_fs), lbls)
ax.xaxis.set_ticks_position('top')
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
elif orientation == 'left':
ax.set_xlim([0, dvw])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(ivticks)
ax.set_yticklabels(ivl)
lbls = ax.get_yticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
ax.yaxis.set_ticks_position('left')
# Make the tick marks invisible because they cover up the
# links
for line in ax.get_yticklines():
line.set_visible(False)
elif orientation == 'right':
ax.set_xlim([dvw, 0])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(ivticks)
ax.set_yticklabels(ivl)
lbls = ax.get_yticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
ax.yaxis.set_ticks_position('right')
# Make the tick marks invisible because they cover up the links
for line in ax.get_yticklines():
line.set_visible(False)
# Let's use collections instead. This way there is a separate legend
# item for each tree grouping, rather than stupidly one for each line
# segment.
colors_used = _remove_dups(color_list)
color_to_lines = {}
for color in colors_used:
color_to_lines[color] = []
for (xline, yline, color) in zip(xlines, ylines, color_list):
color_to_lines[color].append(list(zip(xline, yline)))
colors_to_collections = {}
# Construct the collections.
for color in colors_used:
coll = matplotlib.collections.LineCollection(color_to_lines[color],
colors=(color,))
colors_to_collections[color] = coll
# Add all the groupings below the color threshold.
for color in colors_used:
if color != above_threshold_color:
ax.add_collection(colors_to_collections[color])
# If there is a grouping of links above the color threshold,
# it should go last.
if above_threshold_color in colors_to_collections:
ax.add_collection(colors_to_collections[above_threshold_color])
if contraction_marks is not None:
if orientation in ('left', 'right'):
for (x, y) in contraction_marks:
e = matplotlib.patches.Ellipse((y, x),
width=dvw / 100, height=1.0)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if orientation in ('top', 'bottom'):
for (x, y) in contraction_marks:
e = matplotlib.patches.Ellipse((x, y),
width=1.0, height=dvw / 100)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if trigger_redraw:
matplotlib.pylab.draw_if_interactive()
_link_line_colors = ['g', 'r', 'c', 'm', 'y', 'k']
def set_link_color_palette(palette):
"""
Set list of matplotlib color codes for dendrogram color_threshold.
Parameters
----------
palette : list
A list of matplotlib color codes. The order of
the color codes is the order in which the colors are cycled
through when color thresholding in the dendrogram.
"""
if type(palette) not in (list, tuple):
raise TypeError("palette must be a list or tuple")
_ptypes = [isinstance(p, string_types) for p in palette]
if False in _ptypes:
raise TypeError("all palette list elements must be color strings")
for i in list(_link_line_colors):
_link_line_colors.remove(i)
_link_line_colors.extend(list(palette))
def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
get_leaves=True, orientation='top', labels=None,
count_sort=False, distance_sort=False, show_leaf_counts=True,
no_plot=False, no_labels=False, color_list=None,
leaf_font_size=None, leaf_rotation=None, leaf_label_func=None,
no_leaves=False, show_contracted=False,
link_color_func=None, ax=None, above_threshold_color='b'):
"""
Plots the hierarchical clustering as a dendrogram.
The dendrogram illustrates how each cluster is
composed by drawing a U-shaped link between a non-singleton
cluster and its children. The height of the top of the U-link is
the distance between its children clusters. It is also the
cophenetic distance between original observations in the two
children clusters. It is expected that the distances in Z[:,2] be
monotonic, otherwise crossings appear in the dendrogram.
Parameters
----------
Z : ndarray
The linkage matrix encoding the hierarchical clustering to
render as a dendrogram. See the ``linkage`` function for more
information on the format of ``Z``.
p : int, optional
The ``p`` parameter for ``truncate_mode``.
truncate_mode : str, optional
The dendrogram can be hard to read when the original
observation matrix from which the linkage is derived is
large. Truncation is used to condense the dendrogram. There
are several modes:
``None/'none'``
No truncation is performed (Default).
``'lastp'``
The last ``p`` non-singleton formed in the linkage are the only
non-leaf nodes in the linkage; they correspond to rows
``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are
contracted into leaf nodes.
``'mlab'``
This corresponds to MATLAB(TM) behavior. (not implemented yet)
``'level'/'mtica'``
No more than ``p`` levels of the dendrogram tree are displayed.
This corresponds to Mathematica(TM) behavior.
color_threshold : double, optional
For brevity, let :math:`t` be the ``color_threshold``.
Colors all the descendent links below a cluster node
:math:`k` the same color if :math:`k` is the first node below
the cut threshold :math:`t`. All links connecting nodes with
distances greater than or equal to the threshold are colored
blue. If :math:`t` is less than or equal to zero, all nodes
are colored blue. If ``color_threshold`` is None or
'default', corresponding with MATLAB(TM) behavior, the
threshold is set to ``0.7*max(Z[:,2])``.
get_leaves : bool, optional
Includes a list ``R['leaves']=H`` in the result
dictionary. For each :math:`i`, ``H[i] == j``, cluster node
``j`` appears in position ``i`` in the left-to-right traversal
of the leaves, where :math:`j < 2n-1` and :math:`i < n`.
orientation : str, optional
The direction to plot the dendrogram, which can be any
of the following strings:
``'top'``
Plots the root at the top, and plot descendent links going downwards.
(default).
``'bottom'``
Plots the root at the bottom, and plot descendent links going
upwards.
``'left'``
Plots the root at the left, and plot descendent links going right.
``'right'``
Plots the root at the right, and plot descendent links going left.
labels : ndarray, optional
By default ``labels`` is None so the index of the original observation
is used to label the leaf nodes. Otherwise, this is an :math:`n`
-sized list (or tuple). The ``labels[i]`` value is the text to put
under the :math:`i` th leaf node only if it corresponds to an original
observation and not a non-singleton cluster.
count_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum number of original objects in its cluster
is plotted first.
``'descendent'``
The child with the maximum number of original objects in its cluster
is plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
distance_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum distance between its direct descendents is
plotted first.
``'descending'``
The child with the maximum distance between its direct descendents is
plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
show_leaf_counts : bool, optional
When True, leaf nodes representing :math:`k>1` original
observation are labeled with the number of observations they
contain in parentheses.
no_plot : bool, optional
When True, the final rendering is not performed. This is
useful if only the data structures computed for the rendering
are needed or if matplotlib is not available.
no_labels : bool, optional
When True, no labels appear next to the leaf nodes in the
rendering of the dendrogram.
leaf_rotation : double, optional
Specifies the angle (in degrees) to rotate the leaf
labels. When unspecified, the rotation is based on the number of
nodes in the dendrogram (default is 0).
leaf_font_size : int, optional
Specifies the font size (in points) of the leaf labels. When
unspecified, the size based on the number of nodes in the
dendrogram.
leaf_label_func : lambda or function, optional
When leaf_label_func is a callable function, for each
leaf with cluster index :math:`k < 2n-1`. The function
is expected to return a string with the label for the
leaf.
Indices :math:`k < n` correspond to original observations
while indices :math:`k \\geq n` correspond to non-singleton
clusters.
For example, to label singletons with their node id and
non-singletons with their id, count, and inconsistency
coefficient, simply do::
# First define the leaf label function.
def llf(id):
if id < n:
return str(id)
else:
return '[%d %d %1.2f]' % (id, count, R[n-id,3])
# The text for the leaf nodes is going to be big so force
# a rotation of 90 degrees.
dendrogram(Z, leaf_label_func=llf, leaf_rotation=90)
show_contracted : bool, optional
When True the heights of non-singleton nodes contracted
into a leaf node are plotted as crosses along the link
connecting that leaf node. This really is only useful when
truncation is used (see ``truncate_mode`` parameter).
link_color_func : callable, optional
If given, `link_color_function` is called with each non-singleton id
corresponding to each U-shaped link it will paint. The function is
expected to return the color to paint the link, encoded as a matplotlib
color string code. For example::
dendrogram(Z, link_color_func=lambda k: colors[k])
colors the direct links below each untruncated non-singleton node
``k`` using ``colors[k]``.
ax : matplotlib Axes instance, optional
If None and `no_plot` is not True, the dendrogram will be plotted
on the current axes. Otherwise if `no_plot` is not True the
dendrogram will be plotted on the given ``Axes`` instance. This can be
useful if the dendrogram is part of a more complex figure.
above_threshold_color : str, optional
This matplotlib color string sets the color of the links above the
color_threshold. The default is 'b'.
Returns
-------
R : dict
A dictionary of data structures computed to render the
dendrogram. Its has the following keys:
``'color_list'``
A list of color names. The k'th element represents the color of the
k'th link.
``'icoord'`` and ``'dcoord'``
Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]``
where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]``
where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is
``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``.
``'ivl'``
A list of labels corresponding to the leaf nodes.
``'leaves'``
For each i, ``H[i] == j``, cluster node ``j`` appears in position
``i`` in the left-to-right traversal of the leaves, where
:math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the
``i``-th leaf node corresponds to an original observation.
Otherwise, it corresponds to a non-singleton cluster.
"""
# Features under consideration.
#
# ... = dendrogram(..., leaves_order=None)
#
# Plots the leaves in the order specified by a vector of
# original observation indices. If the vector contains duplicates
# or results in a crossing, an exception will be thrown. Passing
# None orders leaf nodes based on the order they appear in the
# pre-order traversal.
Z = np.asarray(Z, order='c')
if orientation not in ["top", "left", "bottom", "right"]:
raise ValueError("orientation must be one of 'top', 'left', "
"'bottom', or 'right'")
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
if type(p) in (int, float):
p = int(p)
else:
raise TypeError('The second argument must be a number')
if truncate_mode not in ('lastp', 'mlab', 'mtica', 'level', 'none', None):
raise ValueError('Invalid truncation mode.')
if truncate_mode == 'lastp' or truncate_mode == 'mlab':
if p > n or p == 0:
p = n
if truncate_mode == 'mtica' or truncate_mode == 'level':
if p <= 0:
p = np.inf
if get_leaves:
lvs = []
else:
lvs = None
icoord_list = []
dcoord_list = []
color_list = []
current_color = [0]
currently_below_threshold = [False]
if no_leaves:
ivl = None
else:
ivl = []
if color_threshold is None or \
(isinstance(color_threshold, string_types) and
color_threshold == 'default'):
color_threshold = max(Z[:, 2]) * 0.7
R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl,
'leaves': lvs, 'color_list': color_list}
if show_contracted:
contraction_marks = []
else:
contraction_marks = None
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=2 * n - 2, iv=0.0, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
if not no_plot:
mh = max(Z[:, 2])
_plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation,
no_labels, color_list,
leaf_font_size=leaf_font_size,
leaf_rotation=leaf_rotation,
contraction_marks=contraction_marks,
ax=ax,
above_threshold_color=above_threshold_color)
return R
def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
# If leaf node labels are to be displayed...
if ivl is not None:
# If a leaf_label_func has been provided, the label comes from the
# string returned from the leaf_label_func, which is a function
# passed to dendrogram.
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
# Otherwise, if the dendrogram caller has passed a labels list
# for the leaf nodes, use it.
if labels is not None:
ivl.append(labels[int(i - n)])
else:
# Otherwise, use the id as the label for the leaf.x
ivl.append(str(int(i)))
def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels, show_leaf_counts):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
if ivl is not None:
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
if show_leaf_counts:
ivl.append("(" + str(int(Z[i - n, 3])) + ")")
else:
ivl.append("")
def _append_contraction_marks(Z, iv, i, n, contraction_marks):
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks):
if i >= n:
contraction_marks.append((iv, Z[i - n, 2]))
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _dendrogram_calculate_info(Z, p, truncate_mode,
color_threshold=np.inf, get_leaves=True,
orientation='top', labels=None,
count_sort=False, distance_sort=False,
show_leaf_counts=False, i=-1, iv=0.0,
ivl=[], n=0, icoord_list=[], dcoord_list=[],
lvs=None, mhr=False,
current_color=[], color_list=[],
currently_below_threshold=[],
leaf_label_func=None, level=0,
contraction_marks=None,
link_color_func=None,
above_threshold_color='b'):
"""
Calculates the endpoints of the links as well as the labels for the
the dendrogram rooted at the node with index i. iv is the independent
variable value to plot the left-most leaf node below the root node i
(if orientation='top', this would be the left-most x value where the
plotting of this root node i and its descendents should begin).
ivl is a list to store the labels of the leaf nodes. The leaf_label_func
is called whenever ivl != None, labels == None, and
leaf_label_func != None. When ivl != None and labels != None, the
labels list is used only for labeling the leaf nodes. When
ivl == None, no labels are generated for leaf nodes.
When get_leaves==True, a list of leaves is built as they are visited
in the dendrogram.
Returns a tuple with l being the independent variable coordinate that
corresponds to the midpoint of cluster to the left of cluster i if
i is non-singleton, otherwise the independent coordinate of the leaf
node if i is a leaf node.
Returns
-------
A tuple (left, w, h, md), where:
* left is the independent variable coordinate of the center of the
the U of the subtree
* w is the amount of space used for the subtree (in independent
variable units)
* h is the height of the subtree in dependent variable units
* md is the max(Z[*,2]) for all nodes * below and including
the target node.
"""
if n == 0:
raise ValueError("Invalid singleton cluster count n.")
if i == -1:
raise ValueError("Invalid root cluster index i.")
if truncate_mode == 'lastp':
# If the node is a leaf node but corresponds to a non-single cluster,
# it's label is either the empty string or the number of original
# observations belonging to cluster i.
if i < 2 * n - p and i >= n:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mtica', 'level'):
if i > n and level > p:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mlab',):
pass
# Otherwise, only truncate if we have a leaf node.
#
# If the truncate_mode is mlab, the linkage has been modified
# with the truncated tree.
#
# Only place leaves if they correspond to original observations.
if i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
# !!! Otherwise, we don't have a leaf node, so work on plotting a
# non-leaf node.
# Actual indices of a and b
aa = int(Z[i - n, 0])
ab = int(Z[i - n, 1])
if aa > n:
# The number of singletons below cluster a
na = Z[aa - n, 3]
# The distance between a's two direct children.
da = Z[aa - n, 2]
else:
na = 1
da = 0.0
if ab > n:
nb = Z[ab - n, 3]
db = Z[ab - n, 2]
else:
nb = 1
db = 0.0
if count_sort == 'ascending' or count_sort == True:
# If a has a count greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if na > nb:
# The cluster index to draw to the left (ua) will be ab
# and the one to draw to the right (ub) will be aa
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif count_sort == 'descending':
# If a has a count less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if na > nb:
ua = aa
ub = ab
else:
ua = ab
ub = aa
elif distance_sort == 'ascending' or distance_sort == True:
# If a has a distance greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if da > db:
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif distance_sort == 'descending':
# If a has a distance less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if da > db:
ua = aa
ub = ab
else:
ua = ab
ub = aa
else:
ua = aa
ub = ab
# Updated iv variable and the amount of space used.
(uiva, uwa, uah, uamd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ua, iv=iv, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
h = Z[i - n, 2]
if h >= color_threshold or color_threshold <= 0:
c = above_threshold_color
if currently_below_threshold[0]:
current_color[0] = (current_color[0] + 1) % len(_link_line_colors)
currently_below_threshold[0] = False
else:
currently_below_threshold[0] = True
c = _link_line_colors[current_color[0]]
(uivb, uwb, ubh, ubmd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ub, iv=iv + uwa, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
max_dist = max(uamd, ubmd, h)
icoord_list.append([uiva, uiva, uivb, uivb])
dcoord_list.append([uah, h, h, ubh])
if link_color_func is not None:
v = link_color_func(int(i))
if not isinstance(v, string_types):
raise TypeError("link_color_func must return a matplotlib "
"color string!")
color_list.append(v)
else:
color_list.append(c)
return (((uiva + uivb) / 2), uwa + uwb, h, max_dist)
def is_isomorphic(T1, T2):
"""
Determines if two different cluster assignments are equivalent.
Parameters
----------
T1 : array_like
An assignment of singleton cluster ids to flat cluster ids.
T2 : array_like
An assignment of singleton cluster ids to flat cluster ids.
Returns
-------
b : bool
Whether the flat cluster assignments `T1` and `T2` are
equivalent.
"""
T1 = np.asarray(T1, order='c')
T2 = np.asarray(T2, order='c')
if type(T1) != np.ndarray:
raise TypeError('T1 must be a numpy array.')
if type(T2) != np.ndarray:
raise TypeError('T2 must be a numpy array.')
T1S = T1.shape
T2S = T2.shape
if len(T1S) != 1:
raise ValueError('T1 must be one-dimensional.')
if len(T2S) != 1:
raise ValueError('T2 must be one-dimensional.')
if T1S[0] != T2S[0]:
raise ValueError('T1 and T2 must have the same number of elements.')
n = T1S[0]
d = {}
for i in xrange(0, n):
if T1[i] in d:
if d[T1[i]] != T2[i]:
return False
else:
d[T1[i]] = T2[i]
return True
def maxdists(Z):
"""
Returns the maximum distance between any non-singleton cluster.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
Returns
-------
maxdists : ndarray
A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents
the maximum distance between any cluster (including
singletons) below and including the node with index i. More
specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the
set of all node indices below and including node i.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
MD = np.zeros((n - 1,))
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n))
return MD
def maxinconsts(Z, R):
"""
Returns the maximum inconsistency coefficient for each
non-singleton cluster and its descendents.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : ndarray
The inconsistency matrix.
Returns
-------
MI : ndarray
A monotonic ``(n-1)``-sized numpy array of doubles.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
n = Z.shape[0] + 1
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
MI = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, int(n), 3)
return MI
def maxRstat(Z, R, i):
"""
Returns the maximum statistic for each non-singleton cluster and
its descendents.
Parameters
----------
Z : array_like
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : array_like
The inconsistency matrix.
i : int
The column of `R` to use as the statistic.
Returns
-------
MR : ndarray
Calculates the maximum statistic for the i'th column of the
inconsistency matrix `R` for each non-singleton cluster
node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]`` where
``Q(j)`` the set of all node ids corresponding to nodes below
and including ``j``.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
if type(i) is not int:
raise TypeError('The third argument must be an integer.')
if i < 0 or i > 3:
raise ValueError('i must be an integer between 0 and 3 inclusive.')
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
n = Z.shape[0] + 1
MR = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i)
return MR
def leaders(Z, T):
"""
Returns the root nodes in a hierarchical clustering.
Returns the root nodes in a hierarchical clustering corresponding
to a cut defined by a flat cluster assignment vector ``T``. See
the ``fcluster`` function for more information on the format of ``T``.
For each flat cluster :math:`j` of the :math:`k` flat clusters
represented in the n-sized flat cluster assignment vector ``T``,
this function finds the lowest cluster node :math:`i` in the linkage
tree Z such that:
* leaf descendents belong only to flat cluster j
(i.e. ``T[p]==j`` for all :math:`p` in :math:`S(i)` where
:math:`S(i)` is the set of leaf ids of leaf nodes descendent
with cluster node :math:`i`)
* there does not exist a leaf that is not descendent with
:math:`i` that also belongs to cluster :math:`j`
(i.e. ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If
this condition is violated, ``T`` is not a valid cluster
assignment vector, and an exception will be thrown.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
T : ndarray
The flat cluster assignment vector.
Returns
-------
L : ndarray
The leader linkage node id's stored as a k-element 1-D array
where ``k`` is the number of flat clusters found in ``T``.
``L[j]=i`` is the linkage cluster node id that is the
leader of flat cluster with id M[j]. If ``i < n``, ``i``
corresponds to an original observation, otherwise it
corresponds to a non-singleton cluster.
For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with
id 8's leader is linkage node 2.
M : ndarray
The leader linkage node id's stored as a k-element 1-D array where
``k`` is the number of flat clusters found in ``T``. This allows the
set of flat cluster ids to be any arbitrary set of ``k`` integers.
"""
Z = np.asarray(Z, order='c')
T = np.asarray(T, order='c')
if type(T) != np.ndarray or T.dtype != 'i':
raise TypeError('T must be a one-dimensional numpy array of integers.')
is_valid_linkage(Z, throw=True, name='Z')
if len(T) != Z.shape[0] + 1:
raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')
Cl = np.unique(T)
kk = len(Cl)
L = np.zeros((kk,), dtype='i')
M = np.zeros((kk,), dtype='i')
n = Z.shape[0] + 1
[Z, T] = _copy_arrays_if_base_present([Z, T])
s = _hierarchy.leaders(Z, T, L, M, int(kk), int(n))
if s >= 0:
raise ValueError(('T is not a valid assignment vector. Error found '
'when examining linkage node %d (< 2n-1).') % s)
return (L, M)
# These are test functions to help me test the leaders function.
def _leaders_test(Z, T):
tr = to_tree(Z)
_leaders_test_recurs_mark(tr, T)
return tr
def _leader_identify(tr, T):
if tr.is_leaf():
return T[tr.id]
else:
left = tr.get_left()
right = tr.get_right()
lfid = _leader_identify(left, T)
rfid = _leader_identify(right, T)
print('ndid: %d lid: %d lfid: %d rid: %d rfid: %d'
% (tr.get_id(), left.get_id(), lfid, right.get_id(), rfid))
if lfid != rfid:
if lfid != -1:
print('leader: %d with tag %d' % (left.id, lfid))
if rfid != -1:
print('leader: %d with tag %d' % (right.id, rfid))
return -1
else:
return lfid
def _leaders_test_recurs_mark(tr, T):
if tr.is_leaf():
tr.asgn = T[tr.id]
else:
tr.asgn = -1
_leaders_test_recurs_mark(tr.left, T)
_leaders_test_recurs_mark(tr.right, T)
|
bsd-3-clause
|
wgyn/github-topics
|
parse_data.py
|
1
|
1238
|
import bson
import re
import time
import sklearn
import sklearn.decomposition
def normalize_string(s):
return re.sub('[^\w\s]', '', s.lower()).strip()
def tokeep(description):
if description:
return len(description) > 5
else:
return False
if __name__=='__main__':
with open('sample_data/raw/repos.bson', 'r') as f:
repos = bson.decode_all(f.read())
repos = filter(lambda r: tokeep(r['description']), repos)
descs = {r['id']: normalize_string(r['description']) for r in repos if tokeep(r['description'])}
vectorizer = sklearn.feature_extraction.text.CountVectorizer(
max_df=0.95, min_df=5, stop_words='english', max_features=1000)
counts = vectorizer.fit_transform(descs.values())
time = t0
nmf = sklearn.decomposition.NMF(n_components=50, random_state=23).fit(counts)
feature_names = vectorizer.get_feature_names()
print "%d seconds elapsed" % (time() - t0)
n_top_words = 10
for topic_idx, topic in enumerate(nmf.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
topic_features = nmf.transform(counts)
|
mit
|
electoralstats/articles
|
dii-dw/Plotter.py
|
1
|
3410
|
import json
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['font.stretch'] = 'condensed'
mpl.rcParams['font.serif'] = ['Gentium Basic']
mpl.rcParams['font.family'] = 'serif'
def getPoints():
jsonFile = open('DII.json', 'r')
congress = json.loads(jsonFile.read())
return congress
def plot(congress):
repubsDii = []
demsDii = []
repubsDw = []
demsDw = []
for c in congress:
if congress[c]['Party'] == 'R':
repubsDii.append(congress[c]['DIIAllAvg'])
repubsDw.append(congress[c]['dw'])
else:
demsDii.append(congress[c]['DIIAllAvg'])
demsDw.append(congress[c]['dw'])
fig = plt.figure(figsize=(8,4))
ax = fig.add_axes((.1, .8, .8, .9))
plt.scatter(demsDw, demsDii, color="#0000ff", marker='.', lw=0, s=100)
plt.scatter(repubsDw, repubsDii, color="#ff0000", marker='.', lw=0, s=100)
plt.xlabel('DW-Nominate')
plt.ylabel('DII Average (All)')
plt.xlim(-1, 1)
ax.annotate("More Liberal", xy=(-0.55,-4), xytext=(-0.05, -4), horizontalalignment="right", verticalalignment='center', arrowprops=dict(facecolor="black", arrowstyle="->"))
ax.annotate("More Conservative", xy=(0.55, -4), xytext=(0.05, -4), horizontalalignment="left", verticalalignment='center', arrowprops=dict(facecolor="black", arrowstyle="->"))
ax.annotate("More Dissenting Votes", xy=(-0.95, 25), xytext=(-0.95, -4), rotation='vertical', horizontalalignment='center', verticalalignment='bottom', arrowprops=dict(facecolor="black", arrowstyle="->"))
plt.figtext(.1, .63, 'ElectoralStatistics.com', color='#283D4B', ha='left')
plt.savefig("charts/master.png", bbox_inches="tight")
plt.close(fig)
for c in congress:
fig = plt.figure(figsize=(8,4))
ax = fig.add_axes((.1, .8, .8, .9))
plt.scatter(demsDw, demsDii, color="#0000ff", marker='.', lw=0, s=100)
plt.scatter(repubsDw, repubsDii, color="#ff0000", marker='.', lw=0, s=100)
color = "#990000" if congress[c]['Party']=='R' else '#000099'
plt.scatter(congress[c]['dw'], congress[c]['DIIAllAvg'], color=color, lw=0, s=125)
textCoord = (-.5, 27) if congress[c]['Party']=='D' else (.65, 27)
ax.annotate(congress[c]['firstname'] + " " + congress[c]['lastname'] + "\nDW-Nominate: " + '{0:.3f}'.format(congress[c]['dw']) + "\nDII Average: " + '{0:.3f}'.format(congress[c]['DIIAllAvg']), xy=(congress[c]['dw'], congress[c]['DIIAllAvg']), xytext=textCoord, horizontalalignment='center')
ax.annotate("", xy=(congress[c]['dw'], congress[c]['DIIAllAvg']), xytext=textCoord, arrowprops=dict(facecolor="black", arrowstyle="->"))
plt.xlabel('DW-Nominate')
plt.ylabel('DII Average (All)')
plt.xlim(-1, 1)
ax.annotate("More Liberal", xy=(-0.55,-4), xytext=(-0.05, -4), horizontalalignment="right", verticalalignment='center', arrowprops=dict(facecolor="black", arrowstyle="->"))
ax.annotate("More Conservative", xy=(0.55, -4), xytext=(0.05, -4), horizontalalignment="left", verticalalignment='center', arrowprops=dict(facecolor="black", arrowstyle="->"))
ax.annotate("More Dissenting Votes", xy=(-0.95, 25), xytext=(-0.95, -4), rotation='vertical', horizontalalignment='center', verticalalignment='bottom', arrowprops=dict(facecolor="black", arrowstyle="->"))
plt.figtext(.1, .63, 'ElectoralStatistics.com', color='#283D4B', ha='left')
plt.savefig("charts/" + c + ".png", bbox_inches="tight")
plt.close(fig)
congress = getPoints()
plot(congress)
|
mit
|
moosekaka/sweepython
|
cell_pick_viz/vtk_makefigs.py
|
1
|
1735
|
# -*- coding: utf-8 -*-
"""
Plot mitoskel network in with various scalar values
"""
import sys
import os
import os.path as op
import matplotlib.pyplot as plt
from mayavi import mlab
from pipeline.make_networkx import makegraph as mg
from mombud.vtk_viz import vtkvizfuncs as vf
import wrappers as wr
# pylint: disable=C0103
plt.close('all')
mlab.close(all=True)
datadir = op.join(os.getcwd(), 'data')
inptdir = op.join(os.getcwd(), 'input')
# filelist and graph list
if __name__ == '__main__':
filekey = 'YPE_042715_018_RFPstack_052'
try:
vtkF = wr.swalk(op.join(inptdir, 'pipelineFigs'),
'N*Skeleton.vtk', start=5, stop=-13)
vtkS = wr.swalk(op.join(inptdir, 'surfaceFiles'),
'*surface.vtk', stop=-12)
except Exception:
print ("Check your filepaths\nSearch directory is %s\n" % inptdir)
sys.exit()
data = vf.callreader(vtkF[filekey])
node_data, edge_data, nxgrph = mg(data, filekey)
figone = mlab.figure(figure=filekey,
size=(1200, 800),
bgcolor=(.086, .086, .086))
dic = {'DY_minmax',
'WidthEq',
'DY_raw',
'rRFP',
'rGFP',
'bkstRFP',
'bkstGFP'}
for i in dic:
vtkobj, vtktube = vf.cellplot(figone,
vtkF[filekey],
scalartype=i,
rad=.08)
vtktube.actor.mapper.scalar_visibility = True # False for no heatmap
# vf.rendsurf(vtkS[filekey[:3]][filekey[4:]])
vf.labelbpoints(nxgrph, esize=.12)
mlab.savefig(op.join(datadir, 'pipelineFigs', i + '.png'))
|
mit
|
sherpaman/MolToolPy
|
bin/xcorr.py
|
1
|
1461
|
#!/usr/bin/env python
#import matplotlib.pyplot as plt
import numpy as np
import matplotlib.pyplot as plt
from argparse import ArgumentParser
parser = ArgumentParser( description = 'Calculate X-CORRELATION Matrix from g_covar ascii outpoot')
#
# INPUT FILES
#
parser.add_argument("-i","--inp",dest="inp",action="store",type=str,default=None,help="g_covar ASCII output",required=True,metavar="ASCII DAT FILE")
#
# OUTPUT FILES
#
parser.add_argument("-o","--out",dest="out",action="store",type=str,default=None,required=True,help="Output File Name",metavar="DAT FILE")
options = parser.parse_args()
raw_dat = np.loadtxt(options.inp)
N,dim = raw_dat.shape
D = raw_dat.reshape(np.sqrt([N*dim,N*dim]).astype(int))
n_res = int(np.sqrt(N*dim)/3)
v = np.zeros(n_res)
c = np.ones([n_res,n_res])
for i in np.arange(n_res):
v[i] = np.sqrt(np.trace(D[3*i:3*(i+1),3*i:3*(i+1)]))
for j in np.arange(i):
c[i,j] = np.trace(D[3*i:3*(i+1),3*j:3*(j+1)]) / (v[i]*v[j])
c[j,i] = c[i,j]
np.savez(options.out,c)
#np.savetxt(options.out+'.txt',c)
ix = np.triu_indices(len(c),1)
p = np.linspace(0,100,1002)
perc = np.percentile(c[ix],p)
SIGN = (c > p[981]).astype(int) - (c < p[21]).astype(int)
plt.matshow(c,cmap=plt.get_cmap('coolwarm'),vmin=-1.,vmax=+1)
plt.colorbar()
plt.savefig(options.out+".svg")
plt.matshow(SIGN,cmap=plt.get_cmap('coolwarm'))
plt.title("Significance regions")
plt.savefig(options.out+".significance.svg")
#plt.show()
quit()
|
gpl-2.0
|
theofilis/base-line-classifier
|
baseclassifier.py
|
1
|
3451
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
SYNOPSIS
.
DESCRIPTION
EXAMPLES
python baseclassifier.py filename.csv
EXIT STATUS
0 program exit normal
1 program had problem on execution
AUTHOR
Theofilis George <[email protected]>
LICENSE
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
VERSION
1
"""
import sys, os, traceback, optparse
import time
import re
from sklearn.naive_bayes import *
from generatereport import *
from csv_io import *
def main ():
global options, args
print('Start data loading...')
data, statistics = read_data(options.datafile)
X = data["data"]
Y = data["target"]
print('End data loading...')
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.ensemble import ExtraTreesClassifier
clf = GaussianNB()
clf1 = RandomForestClassifier(n_estimators=10, max_depth=None, min_samples_split=1, random_state=0)
clf2 = LogisticRegression()
clf3 = LinearSVC(random_state=0)
clf4 = ExtraTreesClassifier(n_estimators=10, max_depth=None, min_samples_split=1, random_state=0)
# copy css and javascript on project folder
createproject(options.outputfolder)
expirements(options.title, options.outputfolder + "/index.html", [clf, clf1, clf2, clf3, clf4], X, Y)
if __name__ == '__main__':
try:
start_time = time.time()
parser = optparse.OptionParser(formatter=optparse.TitledHelpFormatter(), usage=globals()['__doc__'], version='$Id$')
parser.add_option("-d", "--data", type="string", dest="datafile",
help="csv file")
parser.add_option("-t", "--title", type="string", dest="title",
help="title report")
parser.add_option("-c", "--classiffier", type="string", dest="clf",
help="python file with classiffier")
parser.add_option("-o", "--output", type="string", dest="outputfolder",
help="write report to output folder")
parser.add_option ('-v', '--verbose', action='store_true',
default=False, help='verbose output')
(options, args) = parser.parse_args()
# if len(args) < 1:
# parser.error ('missing argument')
if options.verbose: print time.asctime()
main()
if options.verbose: print time.asctime()
if options.verbose: print 'TOTAL TIME IN MINUTES:',
if options.verbose: print (time.time() - start_time) / 60.0
sys.exit(0)
except KeyboardInterrupt, e: # Ctrl-C
raise e
except SystemExit, e: # sys.exit()
raise e
except Exception, e:
print 'ERROR, UNEXPECTED EXCEPTION'
print str(e)
traceback.print_exc()
os._exit(1)
|
gpl-2.0
|
ZENGXH/scikit-learn
|
sklearn/utils/tests/test_multiclass.py
|
72
|
15350
|
from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from functools import partial
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_label_indicator_matrix
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import is_sequence_of_sequences
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formated as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multilabel-sequences': [
[[0, 1]],
[[0], [1]],
[[1, 2, 3]],
[[1, 2, 1]], # duplicate values, why not?
[[1], [2], [0, 1]],
[[1], [2]],
[[]],
[()],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object')),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
# not currently supported sequence of sequences
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabels
assert_array_equal(assert_warns(DeprecationWarning,
unique_labels,
[(0, 1, 2), (0,), tuple(), (2, 1)]),
np.arange(3))
assert_array_equal(assert_warns(DeprecationWarning,
unique_labels,
[[0, 1, 2], [0], list(), [2, 1]]),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
# Some tests with strings input
assert_array_equal(unique_labels(["a", "b", "c"], ["d"]),
["a", "b", "c", "d"])
assert_array_equal(assert_warns(DeprecationWarning, unique_labels,
[["a", "b"], ["c"]], [["d"]]),
["a", "b", "c", "d"])
@ignore_warnings
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-sequences",
"multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
@ignore_warnings
def test_unique_labels_mixed_types():
# Mix of multilabel-indicator and multilabel-sequences
mix_multilabel_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multilabel-sequences"])
for y_multilabel, y_multiclass in mix_multilabel_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"] +
EXAMPLES["multilabel-sequences"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
# Mix string and number input type
assert_raises(ValueError, unique_labels, [[1, 2], [3]],
[["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [3]])
assert_array_equal(unique_labels([(2,), (0, 2,)], [(), ()]), [0, 2])
assert_array_equal(unique_labels([("2",), ("0", "2",)], [(), ()]),
["0", "2"])
@ignore_warnings
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group.startswith('multilabel'):
assert_, exp = assert_true, 'True'
else:
assert_, exp = assert_false, 'False'
for example in group_examples:
assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s' % (example, exp))
def test_is_label_indicator_matrix():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_label_indicator_matrix(exmpl_sparse),
msg=('is_label_indicator_matrix(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_label_indicator_matrix(example),
msg='is_label_indicator_matrix(%r) should be %s'
% (example, dense_exp))
def test_is_sequence_of_sequences():
for group, group_examples in iteritems(EXAMPLES):
if group == 'multilabel-sequences':
assert_, exp = assert_true, 'True'
check = partial(assert_warns, DeprecationWarning,
is_sequence_of_sequences)
else:
assert_, exp = assert_false, 'False'
check = is_sequence_of_sequences
for example in group_examples:
assert_(check(example),
msg='is_sequence_of_sequences(%r) should be %s'
% (example, exp))
@ignore_warnings
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg='type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example)))
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
|
bsd-3-clause
|
mfjb/scikit-learn
|
examples/gaussian_process/plot_gp_probabilistic_classification_after_regression.py
|
252
|
3490
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
==============================================================================
Gaussian Processes classification example: exploiting the probabilistic output
==============================================================================
A two-dimensional regression exercise with a post-processing allowing for
probabilistic classification thanks to the Gaussian property of the prediction.
The figure illustrates the probability that the prediction is negative with
respect to the remaining uncertainty in the prediction. The red and blue lines
corresponds to the 95% confidence interval on the prediction of the zero level
set.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from scipy import stats
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
from matplotlib import cm
# Standard normal distribution functions
phi = stats.distributions.norm().pdf
PHI = stats.distributions.norm().cdf
PHIinv = stats.distributions.norm().ppf
# A few constants
lim = 8
def g(x):
"""The function to predict (classification will then consist in predicting
whether g(x) <= 0 or not)"""
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
# Design of experiments
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = g(X)
# Instanciate and fit Gaussian Process Model
gp = GaussianProcess(theta0=5e-1)
# Don't perform MLE or you'll get a perfect prediction for this simple example!
gp.fit(X, y)
# Evaluate real function, the prediction and its MSE on a grid
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_pred, MSE = gp.predict(xx, eval_MSE=True)
sigma = np.sqrt(MSE)
y_true = y_true.reshape((res, res))
y_pred = y_pred.reshape((res, res))
sigma = sigma.reshape((res, res))
k = PHIinv(.975)
# Plot the probabilistic classification iso-values using the Gaussian property
# of the prediction
fig = pl.figure(1)
ax = fig.add_subplot(111)
ax.axes.set_aspect('equal')
pl.xticks([])
pl.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
pl.xlabel('$x_1$')
pl.ylabel('$x_2$')
cax = pl.imshow(np.flipud(PHI(- y_pred / sigma)), cmap=cm.gray_r, alpha=0.8,
extent=(- lim, lim, - lim, lim))
norm = pl.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
cb = pl.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$')
pl.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12)
pl.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)
cs = pl.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.025], colors='b',
linestyles='solid')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.5], colors='k',
linestyles='dashed')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.975], colors='r',
linestyles='solid')
pl.clabel(cs, fontsize=11)
pl.show()
|
bsd-3-clause
|
JonasWallin/Mixture
|
example/NIGmix.py
|
1
|
1702
|
'''
Testing if the model can recover the parameter for multi univariate
regular NIG density
Created on May 1, 2016
@author: jonaswallin
'''
from Mixture.density import NIG, mNIG
from Mixture import mixOneDims
import numpy as np
import matplotlib.pyplot as plt
import scipy as sp
import numpy.random as npr
n = 100000
if __name__ == "__main__":
simObj = mNIG(paramvec = np.array([1.1, 2.12, 1., 2.,0, 2.12, .1, .1]))
simObj2 = mNIG(paramvec = np.array([ -1.1, -2.12, .4, .5,-4, 2.12, -2, -.4]))
#Y = simObj.simulate(n = n)
mixObj = mixOneDims(K = 2, d = 2)
mixObj.set_densites([simObj, simObj2])
x_true = np.array([.5, 2.1, 2.12, 1., 2.,-4, .12, .1, .1,
-1.1, -2.12, .4, .5,-1, .12, -1, -.4])
mixObj.set_param_vec(x_true)
Y = mixObj.sample(n = n)
mixObj.set_data(Y)
x_1 = np.linspace(np.min(Y[:,0]),np.max(Y[:,0]), num = 1000)
x_2 = np.linspace(np.min(Y[:,1]),np.max(Y[:,1]), num = 1000)
def f(x):
lik = - np.sum(mixObj(x))
if np.isnan(lik):
return np.Inf
return lik
x0 = npr.randn(1+4*2*2)
#x = sp.optimize.fmin_cg(f, x0 ,epsilon = 1e-4)
x = sp.optimize.fmin_powell(f, x0)
#print(optim)
mixObj.set_param_vec(x)
dens1 = mixObj.density_1d(dim = 0, y = x_1)
dens2 = mixObj.density_1d(dim = 1, y = x_2)
fig, axarr = plt.subplots(2, 1)
axarr[0].hist(Y[:,0], 200,normed=True, histtype='stepfilled', alpha=0.2)
axarr[0].plot(x_1, np.exp(dens1), color = 'red')
axarr[1].hist(Y[:,1], 200,normed=True, histtype='stepfilled', alpha=0.2)
axarr[1].plot(x_2, np.exp(dens2), color = 'red')
plt.show()
|
gpl-3.0
|
ARudiuk/mne-python
|
tutorials/plot_stats_cluster_1samp_test_time_frequency.py
|
4
|
4833
|
"""
.. _tut_stats_cluster_sensor_1samp_tfr:
===============================================================
Non-parametric 1 sample cluster statistic on single trial power
===============================================================
This script shows how to estimate significant clusters
in time-frequency power estimates. It uses a non-parametric
statistical procedure based on permutations and cluster
level statistics.
The procedure consists in:
- extracting epochs
- compute single trial power estimates
- baseline line correct the power estimates (power ratios)
- compute stats to see if ratio deviates from 1.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.time_frequency import single_trial_power
from mne.stats import permutation_cluster_1samp_test
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
# --------------
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_id = 1
tmin = -0.3
tmax = 0.6
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, include=include, exclude='bads')
# Load condition 1
event_id = 1
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6))
data = epochs.get_data() # as 3D matrix
data *= 1e13 # change unit to fT / cm
# Time vector
times = 1e3 * epochs.times # change unit to ms
# Take only one channel
ch_name = raw.info['ch_names'][97]
data = data[:, 97:98, :]
evoked_data = np.mean(data, 0)
# data -= evoked_data[None,:,:] # remove evoked component
# evoked_data = np.mean(data, 0)
# Factor to down-sample the temporal dimension of the PSD computed by
# single_trial_power. Decimation occurs after frequency decomposition and can
# be used to reduce memory usage (and possibly computational time of downstream
# operations such as nonparametric statistics) if you don't need high
# spectrotemporal resolution.
decim = 5
frequencies = np.arange(8, 40, 2) # define frequencies of interest
sfreq = raw.info['sfreq'] # sampling in Hz
epochs_power = single_trial_power(data, sfreq=sfreq, frequencies=frequencies,
n_cycles=4, n_jobs=1,
baseline=(-100, 0), times=times,
baseline_mode='ratio', decim=decim)
# Crop in time to keep only what is between 0 and 400 ms
time_mask = (times > 0) & (times < 400)
evoked_data = evoked_data[:, time_mask]
times = times[time_mask]
# The time vector reflects the original time points, not the decimated time
# points returned by single trial power. Be sure to decimate the time mask
# appropriately.
epochs_power = epochs_power[..., time_mask[::decim]]
epochs_power = epochs_power[:, 0, :, :]
epochs_power = np.log10(epochs_power) # take log of ratio
# under the null hypothesis epochs_power should be now be 0
###############################################################################
# Compute statistic
# -----------------
threshold = 2.5
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_1samp_test(epochs_power, n_permutations=100,
threshold=threshold, tail=0)
###############################################################################
# View time-frequency plots
# -------------------------
plt.clf()
plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 1, 1)
plt.plot(times, evoked_data.T)
plt.title('Evoked response (%s)' % ch_name)
plt.xlabel('time (ms)')
plt.ylabel('Magnetic Field (fT/cm)')
plt.xlim(times[0], times[-1])
plt.ylim(-100, 250)
plt.subplot(2, 1, 2)
# Create new stats image with only significant clusters
T_obs_plot = np.nan * np.ones_like(T_obs)
for c, p_val in zip(clusters, cluster_p_values):
if p_val <= 0.05:
T_obs_plot[c] = T_obs[c]
vmax = np.max(np.abs(T_obs))
vmin = -vmax
plt.imshow(T_obs, cmap=plt.cm.gray,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', vmin=vmin, vmax=vmax)
plt.imshow(T_obs_plot, cmap=plt.cm.RdBu_r,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', vmin=vmin, vmax=vmax)
plt.colorbar()
plt.xlabel('time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title('Induced power (%s)' % ch_name)
plt.show()
|
bsd-3-clause
|
zhoulingjun/zipline
|
zipline/modelling/engine.py
|
7
|
15401
|
"""
Compute Engine for FFC API
"""
from abc import (
ABCMeta,
abstractmethod,
)
from operator import and_
from six import (
iteritems,
itervalues,
with_metaclass,
)
from six.moves import (
reduce,
zip_longest,
)
from numpy import (
add,
empty_like,
)
from pandas import (
DataFrame,
date_range,
MultiIndex,
)
from zipline.lib.adjusted_array import ensure_ndarray
from zipline.errors import NoFurtherDataError
from zipline.modelling.classifier import Classifier
from zipline.modelling.factor import Factor
from zipline.modelling.filter import Filter
from zipline.modelling.graph import TermGraph
class FFCEngine(with_metaclass(ABCMeta)):
@abstractmethod
def factor_matrix(self, terms, start_date, end_date):
"""
Compute values for `terms` between `start_date` and `end_date`.
Returns a DataFrame with a MultiIndex of (date, asset) pairs on the
index. On each date, we return a row for each asset that passed all
instances of `Filter` in `terms, and the columns of the returned frame
will be the keys in `terms` whose values are instances of `Factor`.
Parameters
----------
terms : dict
Map from str -> zipline.modelling.term.Term.
start_date : datetime
The first date of the matrix.
end_date : datetime
The last date of the matrix.
Returns
-------
matrix : pd.DataFrame
A matrix of factors
"""
raise NotImplementedError("factor_matrix")
class NoOpFFCEngine(FFCEngine):
"""
FFCEngine that doesn't do anything.
"""
def factor_matrix(self, terms, start_date, end_date):
return DataFrame(
index=MultiIndex.from_product(
[date_range(start=start_date, end=end_date, freq='D'), ()],
),
columns=sorted(terms.keys())
)
class SimpleFFCEngine(object):
"""
FFC Engine class that computes each term independently.
Parameters
----------
loader : FFCLoader
A loader to use to retrieve raw data for atomic terms.
calendar : DatetimeIndex
Array of dates to consider as trading days when computing a range
between a fixed start and end.
asset_finder : zipline.assets.AssetFinder
An AssetFinder instance. We depend on the AssetFinder to determine
which assets are in the top-level universe at any point in time.
"""
__slots__ = [
'_loader',
'_calendar',
'_finder',
'__weakref__',
]
def __init__(self, loader, calendar, asset_finder):
self._loader = loader
self._calendar = calendar
self._finder = asset_finder
def factor_matrix(self, terms, start_date, end_date):
"""
Compute a factor matrix.
Parameters
----------
terms : dict[str -> zipline.modelling.term.Term]
Dict mapping term names to instances. The supplied names are used
as column names in our output frame.
start_date : pd.Timestamp
Start date of the computed matrix.
end_date : pd.Timestamp
End date of the computed matrix.
The algorithm implemented here can be broken down into the following
stages:
0. Build a dependency graph of all terms in `terms`. Topologically
sort the graph to determine an order in which we can compute the terms.
1. Ask our AssetFinder for a "lifetimes matrix", which should contain,
for each date between start_date and end_date, a boolean value for each
known asset indicating whether the asset existed on that date.
2. Compute each term in the dependency order determined in (0), caching
the results in a a dictionary to that they can be fed into future
terms.
3. For each date, determine the number of assets passing **all**
filters. The sum, N, of all these values is the total number of rows in
our output frame, so we pre-allocate an output array of length N for
each factor in `terms`.
4. Fill in the arrays allocated in (3) by copying computed values from
our output cache into the corresponding rows.
5. Stick the values computed in (4) into a DataFrame and return it.
Step 0 is performed by `zipline.modelling.graph.TermGraph`.
Step 1 is performed in `self.build_lifetimes_matrix`.
Step 2 is performed in `self.compute_chunk`.
Steps 3, 4, and 5 are performed in self._format_factor_matrix.
See Also
--------
FFCEngine.factor_matrix
"""
if end_date <= start_date:
raise ValueError(
"start_date must be before end_date \n"
"start_date=%s, end_date=%s" % (start_date, end_date)
)
graph = TermGraph(terms)
max_extra_rows = graph.max_extra_rows
lifetimes = self.build_lifetimes_matrix(
start_date,
end_date,
max_extra_rows,
)
raw_outputs = self.compute_chunk(graph, lifetimes, {})
lifetimes_between_dates = lifetimes[max_extra_rows:]
dates = lifetimes_between_dates.index.values
assets = lifetimes_between_dates.columns.values
# We only need filters and factors to compute the final output matrix.
filters, factors = {}, {}
for name, term in iteritems(terms):
if isinstance(term, Filter):
filters[name] = raw_outputs[name]
elif isinstance(term, Factor):
factors[name] = raw_outputs[name]
elif isinstance(term, Classifier):
continue
else:
raise ValueError("Unknown term type: %s" % term)
# Treat base_mask as an implicit filter.
# TODO: Is there a clean way to make this actually just be a filter?
filters['base'] = lifetimes_between_dates.values
return self._format_factor_matrix(dates, assets, filters, factors)
def build_lifetimes_matrix(self, start_date, end_date, extra_rows):
"""
Compute a lifetimes matrix from our AssetFinder, then drop columns that
didn't exist at all during the query dates.
Parameters
----------
start_date : pd.Timestamp
Base start date for the matrix.
end_date : pd.Timestamp
End date for the matrix.
extra_rows : int
Number of rows prior to `start_date` to include.
Extra rows are needed by terms like moving averages that require a
trailing window of data to compute.
Returns
-------
lifetimes : pd.DataFrame
Frame of dtype `bool` containing dates from `extra_rows` days
before `start_date`, continuing through to `end_date`. The
returned frame contains as columns all assets in our AssetFinder
that existed for at least one day between `start_date` and
`end_date`.
"""
calendar = self._calendar
finder = self._finder
start_idx, end_idx = self._calendar.slice_locs(start_date, end_date)
if start_idx < extra_rows:
raise NoFurtherDataError(
msg="Insufficient data to compute FFC Matrix: "
"start date was %s, "
"earliest known date was %s, "
"and %d extra rows were requested." % (
start_date, calendar[0], extra_rows,
),
)
# Build lifetimes matrix reaching back to `extra_rows` days before
# `start_date.`
lifetimes = finder.lifetimes(
calendar[start_idx - extra_rows:end_idx]
)
assert lifetimes.index[extra_rows] == start_date
assert lifetimes.index[-1] == end_date
if not lifetimes.columns.unique:
columns = lifetimes.columns
duplicated = columns[columns.duplicated()].unique()
raise AssertionError("Duplicated sids: %d" % duplicated)
# Filter out columns that didn't exist between the requested start and
# end dates.
existed = lifetimes.iloc[extra_rows:].any()
return lifetimes.loc[:, existed]
def _inputs_for_term(self, term, workspace, extra_rows):
"""
Compute inputs for the given term.
This is mostly complicated by the fact that for each input we store
as many rows as will be necessary to serve any term requiring that
input. Thus if Factor A needs 5 extra rows of price, and Factor B
needs 3 extra rows of price, we need to remove 2 leading rows from our
stored prices before passing them to Factor B.
"""
term_extra_rows = term.extra_input_rows
if term.windowed:
return [
workspace[input_].traverse(
term.window_length,
offset=extra_rows[input_] - term_extra_rows
)
for input_ in term.inputs
]
else:
return [
ensure_ndarray(
workspace[input_][
extra_rows[input_] - term_extra_rows:
],
)
for input_ in term.inputs
]
def compute_chunk(self, graph, base_mask, initial_workspace):
"""
Compute the FFC terms in the graph for the requested start and end
dates.
Parameters
----------
graph : zipline.modelling.graph.TermGraph
Returns
-------
results : dict
Dictionary mapping requested results to outputs.
"""
loader = self._loader
extra_rows = graph.extra_rows
max_extra_rows = graph.max_extra_rows
workspace = {}
if initial_workspace is not None:
workspace.update(initial_workspace)
for term in graph.ordered():
# Subclasses are allowed to pre-populate computed values for terms,
# and in the future we may pre-compute atomic terms coming from the
# same dataset. In both cases, it's possible that we already have
# an entry for this term.
if term in workspace:
continue
base_mask_for_term = base_mask.iloc[
max_extra_rows - extra_rows[term]:
]
if term.atomic:
# FUTURE OPTIMIZATION: Scan the resolution order for terms in
# the same dataset and load them here as well.
to_load = [term]
loaded = loader.load_adjusted_array(
to_load,
base_mask_for_term,
)
for loaded_term, adj_array in zip_longest(to_load, loaded):
workspace[loaded_term] = adj_array
else:
if term.windowed:
compute = term.compute_from_windows
else:
compute = term.compute_from_arrays
workspace[term] = compute(
self._inputs_for_term(term, workspace, extra_rows),
base_mask_for_term,
)
assert(workspace[term].shape == base_mask_for_term.shape)
out = {}
for name, term in iteritems(graph.outputs):
# Truncate off extra rows from outputs.
out[name] = workspace[term][extra_rows[term]:]
return out
def _format_factor_matrix(self, dates, assets, filters, factors):
"""
Convert raw computed filters/factors into a DataFrame for public APIs.
Parameters
----------
dates : np.array[datetime64]
Row index for arrays in `filters` and `factors.`
assets : np.array[int64]
Column index for arrays in `filters` and `factors.`
filters : dict
Dict mapping filter names -> computed filters.
factors : dict
Dict mapping factor names -> computed factors.
Returns
-------
factor_matrix : pd.DataFrame
The indices of `factor_matrix` are as follows:
index : two-tiered MultiIndex of (date, asset).
For each date, we return a row for each asset that passed all
filters on that date.
columns : keys from `factor_data`
Each date/asset/factor triple contains the computed value of the given
factor on the given date for the given asset.
"""
# FUTURE OPTIMIZATION: Cythonize all of this.
# Boolean mask of values that passed all filters.
unioned = reduce(and_, itervalues(filters))
# Parallel arrays of (x,y) coords for (date, asset) pairs that passed
# all filters. Each entry here will correspond to a row in our output
# frame.
nonzero_xs, nonzero_ys = unioned.nonzero()
# Raw arrays storing (date, asset) pairs.
# These will form the index of our output frame.
raw_dates_index = empty_like(nonzero_xs, dtype='datetime64[ns]')
raw_assets_index = empty_like(nonzero_xs, dtype=int)
# Mapping from column_name -> array.
# This will be the `data` arg to our output frame.
columns = {
name: empty_like(nonzero_xs, dtype=factor.dtype)
for name, factor in iteritems(factors)
}
# We're going to iterate over `iteritems(columns)` a whole bunch of
# times down below. It's faster to construct iterate over a tuple of
# pairs.
columns_iter = tuple(iteritems(columns))
# This is tricky.
# unioned.sum(axis=1) gives us an array of the same size as `dates`
# containing, for each date, the number of assets that passed our
# filters on that date.
# Running this through add.accumulate gives us an array containing, for
# each date, the running total of the number of assets that passed our
# filters on or before that date.
# This means that (bounds[i - 1], bounds[i]) gives us the indices of
# the first and last rows in our output frame for each date in `dates`.
bounds = add.accumulate(unioned.sum(axis=1))
day_start = 0
for day_idx, day_end in enumerate(bounds):
day_bounds = slice(day_start, day_end)
column_indices = nonzero_ys[day_bounds]
raw_dates_index[day_bounds] = dates[day_idx]
raw_assets_index[day_bounds] = assets[column_indices]
for name, colarray in columns_iter:
colarray[day_bounds] = factors[name][day_idx, column_indices]
# Upper bound of current row becomes lower bound for next row.
day_start = day_end
return DataFrame(
data=columns,
index=MultiIndex.from_arrays(
[
raw_dates_index,
# FUTURE OPTIMIZATION:
# Avoid duplicate lookups by grouping and only looking up
# each unique sid once.
self._finder.retrieve_all(raw_assets_index),
],
)
).tz_localize('UTC', level=0)
|
apache-2.0
|
pprett/scikit-learn
|
sklearn/datasets/tests/test_rcv1.py
|
322
|
2414
|
"""Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
|
bsd-3-clause
|
tapomayukh/projects_in_python
|
classification/Classification_with_kNN/Single_Contact_Classification/Scaled_Features/results/2_categories/test10_cross_validate_categories_mov_fixed_1200ms_scaled_method_v.py
|
1
|
4633
|
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled')
from data_method_V import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 123:
j=0
while j < 140:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:12]
m_W, n_W = np.shape(W)
print 'Reduced Dimension Eigenvector Shape:',m_W, n_W
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
print 'Z-Score Shape:', m_Z, n_Z
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
print 'Transposed Projected Data Shape:', m_Y, n_Y
#Using PYMVPA
PCA_data = np.array(Y.T)
PCA_label_1 = ['Fixed']*35 + ['Movable']*35 + ['Fixed']*35 + ['Movable']*35
PCA_chunk_1 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=3)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1)
print ds1.samples.shape
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
print error
print cvterr.confusion.asstring(description=False)
figure(1)
cvterr.confusion.plot(numbers='True')
show()
# Variances
figure(2)
title('Variances of PCs')
stem(range(len(perc_total)),perc_total,'--b')
axis([-0.3,30.3,0,1.2])
grid('True')
#show()
|
mit
|
StuartLittlefair/astroplan
|
astroplan/plots/time_dependent.py
|
1
|
23685
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import copy
import numpy as np
import operator
import astropy.units as u
from astropy.time import Time
from collections import Sequence
import warnings
import pytz
from ..exceptions import PlotWarning
from ..utils import _set_mpl_style_sheet
__all__ = ['plot_airmass', 'plot_schedule_airmass', 'plot_parallactic',
'plot_altitude']
def _secz_to_altitude(secant_z):
"""
Convert airmass (approximated as the secant of the zenith angle) to
an altitude (aka elevation) in degrees.
Parameters
----------
secant_z : float
Secant of the zenith angle
Returns
-------
altitude : float
Altitude [degrees]
"""
return np.degrees(np.pi/2 - np.arccos(1./secant_z))
def _has_twin(ax):
"""
Solution for detecting twin axes built on `ax`. Courtesy of
Jake Vanderplas http://stackoverflow.com/a/36209590/1340208
"""
for other_ax in ax.figure.axes:
if other_ax is ax:
continue
if other_ax.bbox.bounds == ax.bbox.bounds:
return True
return False
def plot_airmass(targets, observer, time, ax=None, style_kwargs=None,
style_sheet=None, brightness_shading=False,
altitude_yaxis=False, min_airmass=1.0, min_region=None,
max_airmass=3.0, max_region=None, use_local_tz=False):
r"""
Plots airmass as a function of time for a given target.
If a `~matplotlib.axes.Axes` object already exists, an additional
airmass plot will be "stacked" on it. Otherwise, creates a new
`~matplotlib.axes.Axes` object and plots airmass on top of that.
When a scalar `~astropy.time.Time` object is passed in (e.g.,
``Time('2000-1-1')``), the resulting plot will use a 24-hour window
centered on the time indicated, with airmass sampled at regular
intervals throughout.
However, the user can control the exact number and frequency of airmass
calculations used by passing in a non-scalar `~astropy.time.Time`
object. For instance, ``Time(['2000-1-1 23:00:00', '2000-1-1
23:30:00'])`` will result in a plot with only two airmass measurements.
For examples with plots, visit the documentation of
:ref:`plots_time_dependent`.
Parameters
----------
targets : list of `~astroplan.FixedTarget` objects
The celestial bodies of interest.
If a single object is passed it will be converted to a list.
observer : `~astroplan.Observer`
The person, telescope, observatory, etc. doing the observing.
time : `~astropy.time.Time`
If scalar (e.g., ``Time('2000-1-1')``), will result in plotting target
airmasses once an hour over a 24-hour window.
If non-scalar (e.g., ``Time(['2000-1-1'])``, ``[Time('2000-1-1')]``,
``Time(['2000-1-1', '2000-1-2'])``),
will result in plotting data at the exact times specified.
ax : `~matplotlib.axes.Axes` or None, optional.
The `~matplotlib.axes.Axes` object to be drawn on.
If None, uses the current ``Axes``.
style_kwargs : dict or None, optional.
A dictionary of keywords passed into `~matplotlib.pyplot.plot_date`
to set plotting styles.
style_sheet : dict or `None` (optional)
matplotlib style sheet to use. To see available style sheets in
astroplan, print *astroplan.plots.available_style_sheets*. Defaults
to the light theme.
brightness_shading : bool
Shade background of plot to scale roughly with sky brightness. Dark
shading signifies times when the sun is below the horizon. Default
is `False`.
altitude_yaxis : bool
Add alternative y-axis on the right side of the figure with target
altitude. Default is `False`.
min_airmass : float
Lower limit of y-axis airmass range in the plot. Default is ``1.0``.
max_airmass : float
Upper limit of y-axis airmass range in the plot. Default is ``3.0``.
min_region : float
If set, defines an interval between ``min_airmass`` and ``min_region``
that will be shaded. Default is `None`.
max_region : float
If set, defines an interval between ``max_airmass`` and ``max_region``
that will be shaded. Default is `None`.
use_local_tz : bool
If the time is specified in a local timezone, the time will be plotted
in that timezone.
Returns
-------
ax : `~matplotlib.axes.Axes`
An ``Axes`` object with added airmass vs. time plot.
Notes
-----
y-axis is inverted and shows airmasses between 1.0 and 3.0 by default.
If user wishes to change these, use ``ax.<set attribute>`` before drawing
or saving plot:
"""
# Import matplotlib, set style sheet
if style_sheet is not None:
_set_mpl_style_sheet(style_sheet)
import matplotlib.pyplot as plt
from matplotlib import dates
# Set up plot axes and style if needed.
if ax is None:
ax = plt.gca()
if style_kwargs is None:
style_kwargs = {}
style_kwargs = dict(style_kwargs)
style_kwargs.setdefault('linestyle', '-')
style_kwargs.setdefault('linewidth', 1.5)
style_kwargs.setdefault('fmt', '-')
if hasattr(time, 'utcoffset') and use_local_tz:
tzoffset = time.utcoffset()
tzname = time.tzname()
tzinfo = time.tzinfo
else:
tzoffset = 0
tzname = 'UTC'
tzinfo = None
# Populate time window if needed.
# (plot against local time if that's requested)
time_ut = Time(time)
if time_ut.isscalar:
time_ut = time_ut + np.linspace(-12, 12, 100)*u.hour
elif len(time_ut) == 1:
warnings.warn('You used a Time array of length 1. You probably meant '
'to use a scalar. (Or maybe a list with length > 1?).',
PlotWarning)
timetoplot = time_ut + tzoffset
if not isinstance(targets, Sequence):
targets = [targets]
for target in targets:
# Calculate airmass
airmass = observer.altaz(time_ut, target).secz
# Mask out nonsense airmasses
masked_airmass = np.ma.array(airmass, mask=airmass < 1)
# Some checks & info for labels.
try:
target_name = target.name
except AttributeError:
target_name = ''
# Plot data (against timezone-offset time)
ax.plot_date(timetoplot.plot_date, masked_airmass, label=target_name, **style_kwargs)
# Format the time axis
xlo, xhi = (timetoplot[0]), (timetoplot[-1])
ax.set_xlim([xlo.plot_date, xhi.plot_date])
date_formatter = dates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(date_formatter)
plt.setp(ax.get_xticklabels(), rotation=30, ha='right')
# Shade background during night time
if brightness_shading:
start = time_ut[0]
# Calculate and order twilights and set plotting alpha for each
twilights = [
(observer.sun_set_time(start, which='next'), 0.0),
(observer.twilight_evening_civil(start, which='next'), 0.1),
(observer.twilight_evening_nautical(start, which='next'), 0.2),
(observer.twilight_evening_astronomical(start, which='next'), 0.3),
(observer.twilight_morning_astronomical(start, which='next'), 0.4),
(observer.twilight_morning_nautical(start, which='next'), 0.3),
(observer.twilight_morning_civil(start, which='next'), 0.2),
(observer.sun_rise_time(start, which='next'), 0.1),
]
# add 'UTC' to each datetime object created above
twilights = [(t[0].datetime.replace(tzinfo=pytz.utc), t[1])
for t in twilights]
twilights.sort(key=operator.itemgetter(0))
# add in left & right edges, so that if the airmass plot is requested
# during the day, night is properly shaded
left_edges = [(xlo.datetime.replace(tzinfo=tzinfo), twilights[0][1])] + twilights
right_edges = twilights + [(xhi.datetime.replace(tzinfo=tzinfo), twilights[0][1])]
for tw_left, tw_right in zip(left_edges, right_edges):
left = tw_left[0]
right = tw_right[0]
if tzinfo is not None:
# convert to local time zone (which is plotted), then hack away the tzinfo
# so that matplotlib doesn't try to double down on the conversion
left = left.astimezone(tzinfo).replace(tzinfo=None)
right = right.astimezone(tzinfo).replace(tzinfo=None)
ax.axvspan(left, right,
ymin=0, ymax=1, color='grey', alpha=tw_right[1])
# Invert y-axis and set limits.
y_lim = ax.get_ylim()
if y_lim[1] > y_lim[0]:
ax.invert_yaxis()
ax.set_ylim([max_airmass, min_airmass])
# Draw lo/hi limit regions, if present
ymax, ymin = ax.get_ylim() # should be (hi_limit, lo_limit)
if max_region is not None:
ax.axhspan(ymax, max_region, facecolor='#F9EB4E', alpha=0.10)
if min_region is not None:
ax.axhspan(min_region, ymin, facecolor='#F9EB4E', alpha=0.10)
# Set labels.
ax.set_ylabel("Airmass")
ax.set_xlabel("Time from {0} [{1}]".format(min(timetoplot).datetime.date(), tzname))
if altitude_yaxis and not _has_twin(ax):
altitude_ticks = np.array([90, 60, 50, 40, 30, 20])
airmass_ticks = 1./np.cos(np.radians(90 - altitude_ticks))
ax2 = ax.twinx()
ax2.invert_yaxis()
ax2.set_yticks(airmass_ticks)
ax2.set_yticklabels(altitude_ticks)
ax2.set_ylim(ax.get_ylim())
ax2.set_ylabel('Altitude [degrees]')
# Redraw figure for interactive sessions.
ax.figure.canvas.draw()
# Output.
return ax
def plot_altitude(targets, observer, time, ax=None, style_kwargs=None,
style_sheet=None, brightness_shading=False,
airmass_yaxis=False, min_altitude=0, min_region=None,
max_altitude=90, max_region=None):
r"""
Plots altitude as a function of time for a given target.
If a `~matplotlib.axes.Axes` object already exists, an additional
altitude plot will be "stacked" on it. Otherwise, creates a new
`~matplotlib.axes.Axes` object and plots altitude on top of that.
When a scalar `~astropy.time.Time` object is passed in (e.g.,
``Time('2000-1-1')``), the resulting plot will use a 24-hour window
centered on the time indicated, with altitude sampled at regular
intervals throughout.
However, the user can control the exact number and frequency of altitude
calculations used by passing in a non-scalar `~astropy.time.Time`
object. For instance, ``Time(['2000-1-1 23:00:00', '2000-1-1
23:30:00'])`` will result in a plot with only two altitude measurements.
For examples with plots, visit the documentation of
:ref:`plots_time_dependent`.
Parameters
----------
targets : list of `~astroplan.FixedTarget` objects
The celestial bodies of interest.
If a single object is passed it will be converted to a list.
observer : `~astroplan.Observer`
The person, telescope, observatory, etc. doing the observing.
time : `~astropy.time.Time`
If scalar (e.g., ``Time('2000-1-1')``), will result in plotting target
altitudes once an hour over a 24-hour window.
If non-scalar (e.g., ``Time(['2000-1-1'])``, ``[Time('2000-1-1')]``,
``Time(['2000-1-1', '2000-1-2'])``),
will result in plotting data at the exact times specified.
ax : `~matplotlib.axes.Axes` or None, optional.
The `~matplotlib.axes.Axes` object to be drawn on.
If None, uses the current ``Axes``.
style_kwargs : dict or None, optional.
A dictionary of keywords passed into `~matplotlib.pyplot.plot_date`
to set plotting styles.
style_sheet : dict or `None` (optional)
matplotlib style sheet to use. To see available style sheets in
astroplan, print *astroplan.plots.available_style_sheets*. Defaults
to the light theme.
brightness_shading : bool
Shade background of plot to scale roughly with sky brightness. Dark
shading signifies times when the sun is below the horizon. Default
is `False`.
altitude_yaxis : bool
Add alternative y-axis on the right side of the figure with target
altitude. Default is `False`.
min_altitude : float
Lower limit of y-axis altitude range in the plot. Default is ``1.0``.
max_altitude : float
Upper limit of y-axis altitude range in the plot. Default is ``3.0``.
min_region : float
If set, defines an interval between ``min_altitude`` and ``min_region``
that will be shaded. Default is `None`.
max_region : float
If set, defines an interval between ``max_altitude`` and ``max_region``
that will be shaded. Default is `None`.
Returns
-------
ax : `~matplotlib.axes.Axes`
An ``Axes`` object with added altitude vs. time plot.
"""
# Import matplotlib, set style sheet
if style_sheet is not None:
_set_mpl_style_sheet(style_sheet)
import matplotlib.pyplot as plt
from matplotlib import dates
# Set up plot axes and style if needed.
if ax is None:
ax = plt.gca()
if style_kwargs is None:
style_kwargs = {}
style_kwargs = dict(style_kwargs)
style_kwargs.setdefault('linestyle', '-')
style_kwargs.setdefault('linewidth', 1.5)
style_kwargs.setdefault('fmt', '-')
# Populate time window if needed.
time = Time(time)
if time.isscalar:
time = time + np.linspace(-12, 12, 100)*u.hour
elif len(time) == 1:
warnings.warn('You used a Time array of length 1. You probably meant '
'to use a scalar. (Or maybe a list with length > 1?).',
PlotWarning)
if not isinstance(targets, Sequence):
targets = [targets]
for target in targets:
# Calculate airmass
altitude = observer.altaz(time, target).alt
# Mask out nonsense airmasses
masked_altitude = np.ma.array(altitude, mask=altitude < 0)
# Some checks & info for labels.
try:
target_name = target.name
except AttributeError:
target_name = ''
# Plot data
ax.plot_date(time.plot_date, masked_altitude, label=target_name, **style_kwargs)
# Format the time axis
ax.set_xlim([time[0].plot_date, time[-1].plot_date])
date_formatter = dates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(date_formatter)
plt.setp(ax.get_xticklabels(), rotation=30, ha='right')
# Shade background during night time
if brightness_shading:
start = time[0].datetime
# Calculate and order twilights and set plotting alpha for each
twilights = [
(observer.sun_set_time(Time(start), which='next').datetime, 0.0),
(observer.twilight_evening_civil(Time(start), which='next').datetime, 0.1),
(observer.twilight_evening_nautical(Time(start), which='next').datetime, 0.2),
(observer.twilight_evening_astronomical(Time(start), which='next').datetime, 0.3),
(observer.twilight_morning_astronomical(Time(start), which='next').datetime, 0.4),
(observer.twilight_morning_nautical(Time(start), which='next').datetime, 0.3),
(observer.twilight_morning_civil(Time(start), which='next').datetime, 0.2),
(observer.sun_rise_time(Time(start), which='next').datetime, 0.1),
]
twilights.sort(key=operator.itemgetter(0))
for i, twi in enumerate(twilights[1:], 1):
ax.axvspan(twilights[i - 1][0], twilights[i][0],
ymin=0, ymax=1, color='grey', alpha=twi[1])
# Invert y-axis and set limits.
# y_lim = ax.get_ylim()
# if y_lim[1] > y_lim[0]:
# ax.invert_yaxis()
ax.set_ylim([min_altitude, max_altitude])
# Draw lo/hi limit regions, if present
ymax, ymin = ax.get_ylim() # should be (hi_limit, lo_limit)
if max_region is not None:
ax.axhspan(ymax, max_region, facecolor='#F9EB4E', alpha=0.10)
if min_region is not None:
ax.axhspan(min_region, ymin, facecolor='#F9EB4E', alpha=0.10)
# Set labels.
ax.set_ylabel("Altitude")
ax.set_xlabel("Time from {0} [UTC]".format(min(time).datetime.date()))
if airmass_yaxis and not _has_twin(ax):
# altitude_ticks = np.array([90, 60, 50, 40, 30, 20])
# airmass_ticks = 1./np.cos(np.radians(90 - altitude_ticks))
airmass_ticks = np.array([1, 2, 3])
altitude_ticks = 90 - np.degrees(np.arccos(1/airmass_ticks))
ax2 = ax.twinx()
# ax2.invert_yaxis()
ax2.set_yticks(altitude_ticks)
ax2.set_yticklabels(airmass_ticks)
ax2.set_ylim(ax.get_ylim())
ax2.set_ylabel('Airmass')
# Redraw figure for interactive sessions.
ax.figure.canvas.draw()
# Output.
return ax
def plot_schedule_airmass(schedule, show_night=False):
"""
Plots when observations of targets are scheduled to occur superimposed
upon plots of the airmasses of the targets.
Parameters
----------
schedule : `~astroplan.Schedule`
a schedule object output by a scheduler
show_night : bool
Shades the night-time on the plot
Returns
-------
ax : `~matplotlib.axes.Axes`
An ``Axes`` object with added airmass and schedule vs. time plot.
"""
import matplotlib.pyplot as plt
blocks = copy.copy(schedule.scheduled_blocks)
sorted_blocks = sorted(schedule.observing_blocks, key=lambda x: x.priority)
targets = [block.target for block in sorted_blocks]
ts = (schedule.start_time +
np.linspace(0, (schedule.end_time - schedule.start_time).value, 100) * u.day)
targ_to_color = {}
color_idx = np.linspace(0, 1, len(targets))
# lighter, bluer colors indicate higher priority
for target, ci in zip(set(targets), color_idx):
plot_airmass(target, schedule.observer, ts, style_kwargs=dict(color=plt.cm.cool(ci)))
targ_to_color[target.name] = plt.cm.cool(ci)
if show_night:
# I'm pretty sure this overlaps a lot, creating darker bands
for test_time in ts:
midnight = schedule.observer.midnight(test_time)
previous_sunset = schedule.observer.sun_set_time(
midnight, which='previous')
next_sunrise = schedule.observer.sun_rise_time(
midnight, which='next')
previous_twilight = schedule.observer.twilight_evening_astronomical(
midnight, which='previous')
next_twilight = schedule.observer.twilight_morning_astronomical(
midnight, which='next')
plt.axvspan(previous_sunset.plot_date, next_sunrise.plot_date,
facecolor='lightgrey', alpha=0.05)
plt.axvspan(previous_twilight.plot_date, next_twilight.plot_date,
facecolor='lightgrey', alpha=0.05)
for block in blocks:
if hasattr(block, 'target'):
plt.axvspan(block.start_time.plot_date, block.end_time.plot_date,
fc=targ_to_color[block.target.name], lw=0, alpha=.6)
else:
plt.axvspan(block.start_time.plot_date, block.end_time.plot_date,
color='k')
plt.axhline(3, color='k', label='Transitions')
# TODO: make this output a `axes` object
def plot_parallactic(target, observer, time, ax=None, style_kwargs=None,
style_sheet=None):
"""
Plots parallactic angle as a function of time for a given target.
If a `~matplotlib.axes.Axes` object already exists, an additional
parallactic angle plot will be "stacked" on it. Otherwise, creates a
new `~matplotlib.axes.Axes` object and plots on top of that.
When a scalar `~astropy.time.Time` object is passed in (e.g.,
``Time('2000-1-1')``), the resulting plot will use a 24-hour window
centered on the time indicated, with parallactic angle sampled at
regular intervals throughout.
However, the user can control the exact number and frequency of parallactic
angle calculations used by passing in a non-scalar `~astropy.time.Time`
object. For instance, ``Time(['2000-1-1 23:00:00', '2000-1-1 23:30:00'])``
will result in a plot with only two parallactic angle measurements.
For examples with plots, visit the documentation of
:ref:`plots_time_dependent`.
Parameters
----------
target : `~astroplan.FixedTarget`
The celestial body of interest.
observer : `~astroplan.Observer`
The person, telescope, observatory, etc. doing the observing.
time : `~astropy.time.Time`
If scalar (e.g., ``Time('2000-1-1')``), will result in plotting target
parallactic angle once an hour over a 24-hour window.
If non-scalar (e.g., ``Time(['2000-1-1'])``, ``[Time('2000-1-1')]``,
``Time(['2000-1-1', '2000-1-2'])``),
will result in plotting data at the exact times specified.
ax : `~matplotlib.axes.Axes` or None, optional.
The ``Axes`` object to be drawn on.
If None, uses the current ``Axes``.
style_kwargs : dict or None, optional.
A dictionary of keywords passed into `~matplotlib.pyplot.plot_date`
to set plotting styles.
style_sheet : dict or `None` (optional)
matplotlib style sheet to use. To see available style sheets in
astroplan, print *astroplan.plots.available_style_sheets*. Defaults
to the light theme.
Returns
-------
ax : `~matplotlib.axes.Axes`
An ``Axes`` object with added parallactic angle vs. time plot.
"""
# Import matplotlib, set style sheet
if style_sheet is not None:
_set_mpl_style_sheet(style_sheet)
import matplotlib.pyplot as plt
from matplotlib import dates
# Set up plot axes and style if needed.
if ax is None:
ax = plt.gca()
if style_kwargs is None:
style_kwargs = {}
style_kwargs = dict(style_kwargs)
style_kwargs.setdefault('linestyle', '-')
style_kwargs.setdefault('fmt', '-')
# Populate time window if needed.
time = Time(time)
if time.isscalar:
time = time + np.linspace(-12, 12, 100)*u.hour
elif len(time) == 1:
warnings.warn('You used a Time array of length 1. You probably meant '
'to use a scalar. (Or maybe a list with length > 1?).',
PlotWarning)
# Calculate parallactic angle.
p_angle = observer.parallactic_angle(time, target)
# Some checks & info for labels.
assert len(time) == len(p_angle)
if not hasattr(target, 'name'):
target_name = ''
else:
target_name = target.name
style_kwargs.setdefault('label', target_name)
# Plot data.
ax.plot_date(time.plot_date, p_angle, **style_kwargs)
# Format the time axis
date_formatter = dates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(date_formatter)
plt.setp(ax.get_xticklabels(), rotation=30, ha='right')
# Set labels.
ax.set_ylabel("Parallactic Angle - Radians")
ax.set_xlabel("Time from {0} [UTC]".format(min(time).datetime.date()))
# Redraw figure for interactive sessions.
ax.figure.canvas.draw()
return ax
|
bsd-3-clause
|
billy-inn/scikit-learn
|
examples/ensemble/plot_random_forest_embedding.py
|
286
|
3531
|
"""
=========================================================
Hashing feature transformation using Totally Random Trees
=========================================================
RandomTreesEmbedding provides a way to map data to a
very high-dimensional, sparse representation, which might
be beneficial for classification.
The mapping is completely unsupervised and very efficient.
This example visualizes the partitions given by several
trees and shows how the transformation can also be used for
non-linear dimensionality reduction or non-linear classification.
Points that are neighboring often share the same leaf of a tree and therefore
share large parts of their hashed representation. This allows to
separate two concentric circles simply based on the principal components of the
transformed data.
In high-dimensional spaces, linear classifiers often achieve
excellent accuracy. For sparse binary data, BernoulliNB
is particularly well-suited. The bottom row compares the
decision boundary obtained by BernoulliNB in the transformed
space with an ExtraTreesClassifier forests learned on the
original data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_circles
from sklearn.ensemble import RandomTreesEmbedding, ExtraTreesClassifier
from sklearn.decomposition import TruncatedSVD
from sklearn.naive_bayes import BernoulliNB
# make a synthetic dataset
X, y = make_circles(factor=0.5, random_state=0, noise=0.05)
# use RandomTreesEmbedding to transform data
hasher = RandomTreesEmbedding(n_estimators=10, random_state=0, max_depth=3)
X_transformed = hasher.fit_transform(X)
# Visualize result using PCA
pca = TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
# Learn a Naive Bayes classifier on the transformed data
nb = BernoulliNB()
nb.fit(X_transformed, y)
# Learn an ExtraTreesClassifier for comparison
trees = ExtraTreesClassifier(max_depth=3, n_estimators=10, random_state=0)
trees.fit(X, y)
# scatter plot of original and reduced data
fig = plt.figure(figsize=(9, 8))
ax = plt.subplot(221)
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_title("Original Data (2d)")
ax.set_xticks(())
ax.set_yticks(())
ax = plt.subplot(222)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, s=50)
ax.set_title("PCA reduction (2d) of transformed data (%dd)" %
X_transformed.shape[1])
ax.set_xticks(())
ax.set_yticks(())
# Plot the decision in original space. For that, we will assign a color to each
# point in the mesh [x_min, m_max] x [y_min, y_max].
h = .01
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# transform grid using RandomTreesEmbedding
transformed_grid = hasher.transform(np.c_[xx.ravel(), yy.ravel()])
y_grid_pred = nb.predict_proba(transformed_grid)[:, 1]
ax = plt.subplot(223)
ax.set_title("Naive Bayes on Transformed data")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
# transform grid using ExtraTreesClassifier
y_grid_pred = trees.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
ax = plt.subplot(224)
ax.set_title("ExtraTrees predictions")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
astroML/astroML
|
astroML/linear_model/kernel_regression.py
|
2
|
1568
|
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.metrics import pairwise_kernels
class NadarayaWatson(BaseEstimator):
"""Nadaraya-Watson Kernel Regression
This is basically a gaussian-weighted moving average of points
Parameters
----------
kernel : string
kernel is either "gaussian", or one of the kernels available in
sklearn.metrics.pairwise.
h : float or array_like
width of kernel. If array, its length must be the number of
dimensions in the training data
Additional keyword arguments are passed to the kernel.
"""
def __init__(self, kernel='gaussian', h=None, **kwargs):
self.kernel = kernel
self.h = h
self.kwargs = kwargs
def fit(self, X, y, dy=1):
self.X = np.asarray(X)
self.y = np.asarray(y)
self.dy = np.atleast_1d(dy)
return self
def predict(self, X):
X = np.asarray(X)
if X.ndim != 2:
raise ValueError('X must be two-dimensional')
if X.shape[1] != self.X.shape[1]:
raise ValueError('dimensions of X do not match training dimension')
if self.kernel == 'gaussian':
# wrangle gaussian into scikit-learn's 'rbf' kernel
h = np.asarray(self.h)
gamma = 0.5 / h / h
K = pairwise_kernels(X, self.X, metric='rbf', gamma=gamma)
else:
K = pairwise_kernels(X, self.X, metric=self.kernel, **self.kwargs)
K /= self.dy ** 2
return (K * self.y).sum(1) / K.sum(1)
|
bsd-2-clause
|
mit0110/activepipe
|
test_featmultinomial.py
|
1
|
5694
|
"""
Tests for the modified MultinonialNB that checks if it is correctly trained
using labeled features.
"""
import unittest
import numpy as np
from featmultinomial import FeatMultinomalNB
from copy import deepcopy
from math import log
from sklearn import tree
# I should use random numbers here!
X = np.array([
[2, 0, 10, 3],
[5, 0, 1, 0],
[0, 8, 3, 7]
])
Y = np.array([0, 0, 1])
features = np.array([
[1, 1, 1.5, 1],
[1, 1.5, 1, 1]
])
"""
I = [[1,0,1,1],
[1,0,1,0],
[0,1,1,1]]
P(I0=1, c0) = #instances with feat 0 and class 0 / # instances = 2/3
P(I1=1, c0) = 0/3 = 0
P(I2=1, c0) = 2/3
P(I3=1, c0) = 1/3
P(I0=1, c1) = #instances with feat 0 and class 1 / # instances = 0/3 = 0
P(I1=1, c1) = 1/3
P(I2=1, c1) = 1/3
P(I3=1, c1) = 1/3
P(I0=0, c0) = #instances without feat 0 and class 0 / # instances = 0/3 = 0
P(I1=0, c0) = 2/3
P(I2=0, c0) = 0/3
P(I3=0, c0) = 1/3
P(I0=0, c1) = #instances with feat 0 and class 1 / # instances = 1/3
P(I1=0, c1) = 0/3
P(I2=0, c1) = 0/3
P(I3=0, c1) = 0/3
P(I0=1) = 2/3 P(I0=0) = 1/3
P(I1=1) = 1/3 P(I1=0) = 2/3
P(I2=1) = 1 P(I2=0) = 0
P(I3=1) = 2/3 P(I3=0) = 1/3
P(c0) = 2/3
P(c1) = 1/3
IG(f0) = (P(I0=1, c0) * log(P(I0=1, c0) / (P(I0=1) * P(c0)) ) ) +
(P(I0=1, c1) * log(P(I0=1, c1) / (P(I0=1) * P(c1)) ) ) +
(P(I0=0, c0) * log(P(I0=0, c0) / (P(I0=0) * P(c0)) ) ) +
(P(I0=0, c1) * log(P(I0=0, c1) / (P(I0=0) * P(c1)) ) )
= (2.0/3.0 * log(2.0/3.0 / (2.0/3.0 * 2.0/3.0) ) ) +
(0 * log(0 / (2.0/3.0 * 1/3.0) ) ) +
(0 * log(0 / (1/3.0 * 2.0/3.0) ) ) +
(1/3.0 * log(1/3.0 / (1/3.0 * 1/3.0) ) )
= 0.27031 + 0 + 0 + 0.3662 = 0.63651
IG(f1) = (0 * log(0 / (1/3.0 * 2/3.0) ) ) +
(1.0/3.0 * log(1.0/3.0 / (1.0/3.0 * 1.0/3.0) ) ) +
(2/3.0 * log(2/3.0 / (2.0/3.0 * 2/3.0) ) ) +
(0 * log(1.0/3.0 / (2.0/3.0 * 1.0/3.0) ) )
= 0 + 0.3662 + 0.27031 + 0.13515 = 0.7716
IG(f2) = (2/3.0 * log(2/3.0 / (1 * 2/3.0) ) ) +
(1/3.0 * log(1/3.0 / (1 * 1/3.0) ) ) +
(0 * log(0 / (0 * 2/3.0) ) ) +
(0 * log(0 / (0 * 1/3.0) ) )
= 0 + 0 + 0 + 0 = 0
IG(f3) = (1/3.0 * log(1/3.0 / (2/3.0 * 2/3.0) ) ) +
(1/3.0 * log(1/3.0 / (2/3.0 * 1/3.0) ) ) +
(1/3.0 * log(1/3.0 / (1/3.0 * 2/3.0) ) ) +
(0 * log(0 / (1/3.0 * 1/3.0) ) )
= -0.09589402415059363 + 0.135115 + 0.135115 + 0 = 0.17433
"""
ig_correct_anwers = [0.636514, 0.636514, 0.0, 0.17441]
class TestFeatMultinomialNB(unittest.TestCase):
def setUp(self):
self.fmnb = FeatMultinomalNB()
self.fmnb.fit(X, Y)
def test_fit(self):
no_feat_prior = deepcopy(self.fmnb.feature_log_prob_)
self.fmnb.fit(X, Y, features=features)
feat_prior = self.fmnb.feature_log_prob_
self.assertNotEqual(no_feat_prior[0][2], feat_prior[0][2])
self.assertTrue(np.all(self.fmnb.alpha == features))
def test_information_gain(self):
ig = self.fmnb.feat_information_gain
self.assertEqual(ig.shape[0], X.shape[1])
for i, answer in enumerate(ig):
self.assertAlmostEqual(answer, ig_correct_anwers[i], places=4)
self.assertTrue(np.all(ig.argsort() == [2, 3, 0, 1]))
def test_instance_proba(self):
"""
P(f0) = 0.4*0.25 + 0.6*0.75 = 0.55
P(f1) = 0.2*0.25 + 0.8*0.75 = 0.65
P(f2) = 0.5*0.25 + 0.5*0.75 = 0.5
P(I0) = 0.55**0 * 0.65**1 * 0.5**3 = 0.08125
P(I1) = 0.55**2 * 0.65**0 * 0.5**5 = 0.0094
P(I2) = 0.55**3 * 0.65**0 * 0.5**1 = 0.083
P(I3) = 0.55**0 * 0.65**4 * 0.5**0 = 0.1785
"""
self.fmnb.feature_log_prob_ = np.log(np.array([[0.4, 0.2, 0.5],
[0.6, 0.8, 0.5]]))
self.fmnb.class_log_prior_ = np.log(np.array([0.25, 0.75]))
instances = np.array([[0, 1, 3],
[2, 0, 5],
[3, 0, 1],
[0, 4, 0]])
result = self.fmnb.instance_proba(instances)
expected = np.array([0.08125, 0.0094, 0.083, 0.1785])
self.assertEqual(result.shape, (4,))
np.testing.assert_array_almost_equal(result, expected, decimal=3)
class TestIGwithDecisionTree(unittest.TestCase):
def setUp(self):
self.fmnb = FeatMultinomalNB()
self.dtree = tree.DecisionTreeClassifier(criterion='entropy',
min_samples_split=1,
min_samples_leaf=1)
def tearDown(self):
self.assertTrue(np.all(self.fmnb.feat_information_gain.argsort() ==
self.dtree.feature_importances_.argsort()))
def test_ig_with_iris(self):
from sklearn.datasets import load_iris
iris = load_iris()
self.fmnb.fit((iris.data > 3), iris.target)
self.dtree.fit((iris.data > 3), iris.target)
def test_ig_with_bag_of_words(self):
from sklearn.feature_extraction.text import CountVectorizer
corpus = ['This is a corpus and the main',
'objective is to have senteces to simulate a sparse',
'matrix of features, on the opposite of the iris corpus',
'that has few features and all the features are present in all',
'the instances.',
'By the way, this all are documents.']
target = [1, 2, 3, 2, 1, 2]
vectorizer = CountVectorizer(min_df=1)
X = vectorizer.fit_transform(corpus)
self.fmnb.fit(X, target)
self.dtree.fit(X.todense(), target)
if __name__ == '__main__':
unittest.main()
|
mit
|
RomainBrault/scikit-learn
|
sklearn/linear_model/base.py
|
9
|
20680
|
"""
Generalized Linear models.
"""
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Vincent Michel <[email protected]>
# Peter Prettenhofer <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck
# Maryan Morel <[email protected]>
# Giorgio Patrini <[email protected]>
# License: BSD 3 clause
from __future__ import division
from abc import ABCMeta, abstractmethod
import numbers
import warnings
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from scipy import sparse
from ..externals import six
from ..externals.joblib import Parallel, delayed
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..utils import check_array, check_X_y, deprecated, as_float_array
from ..utils.validation import FLOAT_DTYPES
from ..utils import check_random_state
from ..utils.extmath import safe_sparse_dot
from ..utils.sparsefuncs import mean_variance_axis, inplace_column_scale
from ..utils.fixes import sparse_lsqr
from ..utils.seq_dataset import ArrayDataset, CSRDataset
from ..utils.validation import check_is_fitted
from ..exceptions import NotFittedError
from ..preprocessing.data import normalize as f_normalize
# TODO: bayesian_ridge_regression and bayesian_regression_ard
# should be squashed into its respective objects.
SPARSE_INTERCEPT_DECAY = 0.01
# For sparse data intercept updates are scaled by this decay factor to avoid
# intercept oscillation.
def make_dataset(X, y, sample_weight, random_state=None):
"""Create ``Dataset`` abstraction for sparse and dense inputs.
This also returns the ``intercept_decay`` which is different
for sparse datasets.
"""
rng = check_random_state(random_state)
# seed should never be 0 in SequentialDataset
seed = rng.randint(1, np.iinfo(np.int32).max)
if sp.issparse(X):
dataset = CSRDataset(X.data, X.indptr, X.indices, y, sample_weight,
seed=seed)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
dataset = ArrayDataset(X, y, sample_weight, seed=seed)
intercept_decay = 1.0
return dataset, intercept_decay
@deprecated("sparse_center_data was deprecated in version 0.18 and will be "
"removed in 0.20. Use utilities in preprocessing.data instead")
def sparse_center_data(X, y, fit_intercept, normalize=False):
"""
Compute information needed to center data to have mean zero along
axis 0. Be aware that X will not be centered since it would break
the sparsity, but will be normalized if asked so.
"""
if fit_intercept:
# we might require not to change the csr matrix sometimes
# store a copy if normalize is True.
# Change dtype to float64 since mean_variance_axis accepts
# it that way.
if sp.isspmatrix(X) and X.getformat() == 'csr':
X = sp.csr_matrix(X, copy=normalize, dtype=np.float64)
else:
X = sp.csc_matrix(X, copy=normalize, dtype=np.float64)
X_offset, X_var = mean_variance_axis(X, axis=0)
if normalize:
# transform variance to std in-place
X_var *= X.shape[0]
X_std = np.sqrt(X_var, X_var)
del X_var
X_std[X_std == 0] = 1
inplace_column_scale(X, 1. / X_std)
else:
X_std = np.ones(X.shape[1])
y_offset = y.mean(axis=0)
y = y - y_offset
else:
X_offset = np.zeros(X.shape[1])
X_std = np.ones(X.shape[1])
y_offset = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype)
return X, y, X_offset, y_offset, X_std
@deprecated("center_data was deprecated in version 0.18 and will be removed in "
"0.20. Use utilities in preprocessing.data instead")
def center_data(X, y, fit_intercept, normalize=False, copy=True,
sample_weight=None):
"""
Centers data to have mean zero along axis 0. This is here because
nearly all linear models will want their data to be centered.
If sample_weight is not None, then the weighted mean of X and y
is zero, and not the mean itself
"""
X = as_float_array(X, copy)
if fit_intercept:
if isinstance(sample_weight, numbers.Number):
sample_weight = None
if sp.issparse(X):
X_offset = np.zeros(X.shape[1])
X_std = np.ones(X.shape[1])
else:
X_offset = np.average(X, axis=0, weights=sample_weight)
X -= X_offset
# XXX: currently scaled to variance=n_samples
if normalize:
X_std = np.sqrt(np.sum(X ** 2, axis=0))
X_std[X_std == 0] = 1
X /= X_std
else:
X_std = np.ones(X.shape[1])
y_offset = np.average(y, axis=0, weights=sample_weight)
y = y - y_offset
else:
X_offset = np.zeros(X.shape[1])
X_std = np.ones(X.shape[1])
y_offset = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype)
return X, y, X_offset, y_offset, X_std
def _preprocess_data(X, y, fit_intercept, normalize=False, copy=True,
sample_weight=None, return_mean=False):
"""
Centers data to have mean zero along axis 0. If fit_intercept=False or if
the X is a sparse matrix, no centering is done, but normalization can still
be applied. The function returns the statistics necessary to reconstruct
the input data, which are X_offset, y_offset, X_scale, such that the output
X = (X - X_offset) / X_scale
X_scale is the L2 norm of X - X_offset. If sample_weight is not None,
then the weighted mean of X and y is zero, and not the mean itself. If
return_mean=True, the mean, eventually weighted, is returned, independently
of whether X was centered (option used for optimization with sparse data in
coordinate_descend).
This is here because nearly all linear models will want their data to be
centered.
"""
if isinstance(sample_weight, numbers.Number):
sample_weight = None
X = check_array(X, copy=copy, accept_sparse=['csr', 'csc'],
dtype=FLOAT_DTYPES)
if fit_intercept:
if sp.issparse(X):
X_offset, X_var = mean_variance_axis(X, axis=0)
if not return_mean:
X_offset = np.zeros(X.shape[1])
if normalize:
# TODO: f_normalize could be used here as well but the function
# inplace_csr_row_normalize_l2 must be changed such that it
# can return also the norms computed internally
# transform variance to norm in-place
X_var *= X.shape[0]
X_scale = np.sqrt(X_var, X_var)
del X_var
X_scale[X_scale == 0] = 1
inplace_column_scale(X, 1. / X_scale)
else:
X_scale = np.ones(X.shape[1])
else:
X_offset = np.average(X, axis=0, weights=sample_weight)
X -= X_offset
if normalize:
X, X_scale = f_normalize(X, axis=0, copy=False,
return_norm=True)
else:
X_scale = np.ones(X.shape[1])
y_offset = np.average(y, axis=0, weights=sample_weight)
y = y - y_offset
else:
X_offset = np.zeros(X.shape[1])
X_scale = np.ones(X.shape[1])
y_offset = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype)
return X, y, X_offset, y_offset, X_scale
# TODO: _rescale_data should be factored into _preprocess_data.
# Currently, the fact that sag implements its own way to deal with
# sample_weight makes the refactoring tricky.
def _rescale_data(X, y, sample_weight):
"""Rescale data so as to support sample_weight"""
n_samples = X.shape[0]
sample_weight = sample_weight * np.ones(n_samples)
sample_weight = np.sqrt(sample_weight)
sw_matrix = sparse.dia_matrix((sample_weight, 0),
shape=(n_samples, n_samples))
X = safe_sparse_dot(sw_matrix, X)
y = safe_sparse_dot(sw_matrix, y)
return X, y
class LinearModel(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for Linear Models"""
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _decision_function(self, X):
check_is_fitted(self, "coef_")
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
Returns
-------
C : array, shape = (n_samples,)
Returns predicted values.
"""
return self._decision_function(X)
_preprocess_data = staticmethod(_preprocess_data)
def _set_intercept(self, X_offset, y_offset, X_scale):
"""Set the intercept_
"""
if self.fit_intercept:
self.coef_ = self.coef_ / X_scale
self.intercept_ = y_offset - np.dot(X_offset, self.coef_.T)
else:
self.intercept_ = 0.
# XXX Should this derive from LinearModel? It should be a mixin, not an ABC.
# Maybe the n_features checking can be moved to LinearModel.
class LinearClassifierMixin(ClassifierMixin):
"""Mixin for linear classifiers.
Handles prediction for sparse and dense X.
"""
def decision_function(self, X):
"""Predict confidence scores for samples.
The confidence score for a sample is the signed distance of that
sample to the hyperplane.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
Returns
-------
array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes)
Confidence scores per (sample, class) combination. In the binary
case, confidence score for self.classes_[1] where >0 means this
class would be predicted.
"""
if not hasattr(self, 'coef_') or self.coef_ is None:
raise NotFittedError("This %(name)s instance is not fitted "
"yet" % {'name': type(self).__name__})
X = check_array(X, accept_sparse='csr')
n_features = self.coef_.shape[1]
if X.shape[1] != n_features:
raise ValueError("X has %d features per sample; expecting %d"
% (X.shape[1], n_features))
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel() if scores.shape[1] == 1 else scores
def predict(self, X):
"""Predict class labels for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples]
Predicted class label per sample.
"""
scores = self.decision_function(X)
if len(scores.shape) == 1:
indices = (scores > 0).astype(np.int)
else:
indices = scores.argmax(axis=1)
return self.classes_[indices]
def _predict_proba_lr(self, X):
"""Probability estimation for OvR logistic regression.
Positive class probabilities are computed as
1. / (1. + np.exp(-self.decision_function(X)));
multiclass is handled by normalizing that over all classes.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if prob.ndim == 1:
return np.vstack([1 - prob, prob]).T
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
class SparseCoefMixin(object):
"""Mixin for converting coef_ to and from CSR format.
L1-regularizing estimators should inherit this.
"""
def densify(self):
"""Convert coefficient matrix to dense array format.
Converts the ``coef_`` member (back) to a numpy.ndarray. This is the
default format of ``coef_`` and is required for fitting, so calling
this method is only required on models that have previously been
sparsified; otherwise, it is a no-op.
Returns
-------
self : estimator
"""
msg = "Estimator, %(name)s, must be fitted before densifying."
check_is_fitted(self, "coef_", msg=msg)
if sp.issparse(self.coef_):
self.coef_ = self.coef_.toarray()
return self
def sparsify(self):
"""Convert coefficient matrix to sparse format.
Converts the ``coef_`` member to a scipy.sparse matrix, which for
L1-regularized models can be much more memory- and storage-efficient
than the usual numpy.ndarray representation.
The ``intercept_`` member is not converted.
Notes
-----
For non-sparse models, i.e. when there are not many zeros in ``coef_``,
this may actually *increase* memory usage, so use this method with
care. A rule of thumb is that the number of zero elements, which can
be computed with ``(coef_ == 0).sum()``, must be more than 50% for this
to provide significant benefits.
After calling this method, further fitting with the partial_fit
method (if any) will not work until you call densify.
Returns
-------
self : estimator
"""
msg = "Estimator, %(name)s, must be fitted before sparsifying."
check_is_fitted(self, "coef_", msg=msg)
self.coef_ = sp.csr_matrix(self.coef_)
return self
class LinearRegression(LinearModel, RegressorMixin):
"""
Ordinary least squares Linear Regression.
Parameters
----------
fit_intercept : boolean, optional, default True
whether to calculate the intercept for this model. If set
to False, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit`` on
an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
n_jobs : int, optional, default 1
The number of jobs to use for the computation.
If -1 all CPUs are used. This will only provide speedup for
n_targets > 1 and sufficient large problems.
Attributes
----------
coef_ : array, shape (n_features, ) or (n_targets, n_features)
Estimated coefficients for the linear regression problem.
If multiple targets are passed during the fit (y 2D), this
is a 2D array of shape (n_targets, n_features), while if only
one target is passed, this is a 1D array of length n_features.
intercept_ : array
Independent term in the linear model.
Notes
-----
From the implementation point of view, this is just plain Ordinary
Least Squares (scipy.linalg.lstsq) wrapped as a predictor object.
"""
def __init__(self, fit_intercept=True, normalize=False, copy_X=True,
n_jobs=1):
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.n_jobs = n_jobs
def fit(self, X, y, sample_weight=None):
"""
Fit linear model.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples, n_targets]
Target values
sample_weight : numpy array of shape [n_samples]
Individual weights for each sample
.. versionadded:: 0.17
parameter *sample_weight* support to LinearRegression.
Returns
-------
self : returns an instance of self.
"""
n_jobs_ = self.n_jobs
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
y_numeric=True, multi_output=True)
if sample_weight is not None and np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, fit_intercept=self.fit_intercept, normalize=self.normalize,
copy=self.copy_X, sample_weight=sample_weight)
if sample_weight is not None:
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
if sp.issparse(X):
if y.ndim < 2:
out = sparse_lsqr(X, y)
self.coef_ = out[0]
self._residues = out[3]
else:
# sparse_lstsq cannot handle y with shape (M, K)
outs = Parallel(n_jobs=n_jobs_)(
delayed(sparse_lsqr)(X, y[:, j].ravel())
for j in range(y.shape[1]))
self.coef_ = np.vstack(out[0] for out in outs)
self._residues = np.vstack(out[3] for out in outs)
else:
self.coef_, self._residues, self.rank_, self.singular_ = \
linalg.lstsq(X, y)
self.coef_ = self.coef_.T
if y.ndim == 1:
self.coef_ = np.ravel(self.coef_)
self._set_intercept(X_offset, y_offset, X_scale)
return self
def _pre_fit(X, y, Xy, precompute, normalize, fit_intercept, copy):
"""Aux function used at beginning of fit in linear models"""
n_samples, n_features = X.shape
if sparse.isspmatrix(X):
precompute = False
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, fit_intercept=fit_intercept, normalize=normalize,
return_mean=True)
else:
# copy was done in fit if necessary
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, fit_intercept=fit_intercept, normalize=normalize, copy=copy)
if hasattr(precompute, '__array__') and (
fit_intercept and not np.allclose(X_offset, np.zeros(n_features)) or
normalize and not np.allclose(X_scale, np.ones(n_features))):
warnings.warn("Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
UserWarning)
# recompute Gram
precompute = 'auto'
Xy = None
# precompute if n_samples > n_features
if isinstance(precompute, six.string_types) and precompute == 'auto':
precompute = (n_samples > n_features)
if precompute is True:
# make sure that the 'precompute' array is contiguous.
precompute = np.empty(shape=(n_features, n_features), dtype=X.dtype,
order='C')
np.dot(X.T, X, out=precompute)
if not hasattr(precompute, '__array__'):
Xy = None # cannot use Xy if precompute is not Gram
if hasattr(precompute, '__array__') and Xy is None:
common_dtype = np.find_common_type([X.dtype, y.dtype], [])
if y.ndim == 1:
# Xy is 1d, make sure it is contiguous.
Xy = np.empty(shape=n_features, dtype=common_dtype, order='C')
np.dot(X.T, y, out=Xy)
else:
# Make sure that Xy is always F contiguous even if X or y are not
# contiguous: the goal is to make it fast to extract the data for a
# specific target.
n_targets = y.shape[1]
Xy = np.empty(shape=(n_features, n_targets), dtype=common_dtype,
order='F')
np.dot(y.T, X, out=Xy.T)
return X, y, X_offset, y_offset, X_scale, precompute, Xy
|
bsd-3-clause
|
phev8/dataset_tools
|
playground/scene_rec_location_test.py
|
1
|
6514
|
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from datetime import datetime
from experiment_handler.time_synchronisation import convert_timestamps
from experiment_handler.label_data_reader import read_experiment_phases, read_location_labels
from feature_calculations.colocation.common import get_location_of_persons_at_samples
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import precision_recall_fscore_support, confusion_matrix
def read_roi_scene_recognitions_for_person(exp_root, person_id):
filepath = os.path.join(exp_root, "processed_data", "scene_recognition_results", person_id + "_et_scene_classes.pkl")
data = pd.read_pickle(filepath)
return data
def generate_sample_times(start, end, step):
return np.arange(start, end, step)
def create_training_matrices(experiment, sample_times):
"""
Create a training matrix (including sample time, feature vector and label) for each person
Parameters
----------
experiment: str
Path to the root of the selected experiment
sample_times: array like
List of times where the samples should be generated.
Returns
-------
data: dict
with keys: person IDs and values: numpy array with columns [t, beacon_detection_vector, location_label]
"""
# Labels:
location_labels = read_location_labels(experiment)
locations = get_location_of_persons_at_samples(location_labels, sample_times, experiment)
p1_scene_features = read_roi_scene_recognitions_for_person(experiment, "P1")
p2_scene_features = read_roi_scene_recognitions_for_person(experiment, "P2")
p3_scene_features = read_roi_scene_recognitions_for_person(experiment, "P3")
p4_scene_features = read_roi_scene_recognitions_for_person(experiment, "P4")
print(p1_scene_features.loc[0]['predictions'].shape[0])
print(len(p1_scene_features['predictions'][0]))
data = {
"P1": np.zeros((len(sample_times), 2 + p1_scene_features.loc[0]['predictions'].shape[0])),
"P2": np.zeros((len(sample_times), 2 + p1_scene_features.loc[0]['predictions'].shape[0])),
"P3": np.zeros((len(sample_times), 2 + p1_scene_features.loc[0]['predictions'].shape[0])),
"P4": np.zeros((len(sample_times), 2 + p1_scene_features.loc[0]['predictions'].shape[0]))
}
for index, loc_label in enumerate(locations):
t = loc_label[0]
t_et = convert_timestamps(exp_root, t, "video", "P1_eyetracker")
p1 = np.mean(p1_scene_features[p1_scene_features['timestamp'].between(t_et - sample_step/2, t_et + sample_step/2)]['predictions'].as_matrix(), axis=0)
t_et = convert_timestamps(exp_root, t, "video", "P2_eyetracker")
p2 = np.mean(
p2_scene_features[p2_scene_features['timestamp'].between(t_et - sample_step / 2, t_et + sample_step / 2)][
'predictions'].as_matrix(), axis=0)
t_et = convert_timestamps(exp_root, t, "video", "P3_eyetracker")
p3 = np.mean(
p3_scene_features[p3_scene_features['timestamp'].between(t_et - sample_step / 2, t_et + sample_step / 2)][
'predictions'].as_matrix(), axis=0)
t_et = convert_timestamps(exp_root, t, "video", "P4_eyetracker")
p4 = np.mean(
p4_scene_features[p4_scene_features['timestamp'].between(t_et - sample_step / 2, t_et + sample_step / 2)][
'predictions'].as_matrix(), axis=0)
data["P1"][index, 0] = t
data["P1"][index, 1:-1] = p1
data["P1"][index, -1] = loc_label[1]
data["P2"][index, 0] = t
data["P2"][index, 1:-1] = p2
data["P2"][index, -1] = loc_label[2]
data["P3"][index, 0] = t
data["P3"][index, 1:-1] = p3
data["P3"][index, -1] = loc_label[3]
data["P4"][index, 0] = t
data["P4"][index, 1:-1] = p4
data["P4"][index, -1] = loc_label[4]
return data
# TODO: test classifiers for location detections using leave one person out for one experiment
def test_location_detection_with_one_experiment(experiment_root, sample_distance):
phases = read_experiment_phases(exp_root)
times = generate_sample_times(phases['assembly'][0], phases['disassembly'][1], sample_distance)
data_matrices = create_training_matrices(experiment_root, times)
person_list = ["P1", "P2", "P3", "P4"]
scores = []
for for_training in person_list:
X_train = data_matrices[for_training][:, 1:-1]
y_train = data_matrices[for_training][:, -1]
y_train[y_train == 5] = 4
X_test = np.zeros((0, X_train.shape[1]))
y_test = np.zeros((0))
for p in person_list:
if p == for_training:
continue
X_test = np.append(X_test, data_matrices[p][:, 1:-1], axis=0)
y_test = np.append(y_test, data_matrices[p][:, -1], axis=0)
y_test[y_test == 5] = 4
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
score = precision_recall_fscore_support(y_test, y_pred, average='weighted')
print(for_training, score)
scores.append(score)
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print(cnf_matrix)
print("----------")
scores = np.array(scores)
print(np.mean(scores[:, :-1], axis=0))
# TODO: confusion matrix
# event plot if trained for one person
X_train = data_matrices["P1"][:, 1:-1]
y_train = data_matrices["P1"][:, -1]
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
f, axarr = plt.subplots(4, sharex=True, figsize=(16, 10))
for idx, test_for in enumerate(person_list):
t_test = data_matrices[test_for][:, 0]
y_test = data_matrices[test_for][:, -1]
y_pred = clf.predict(data_matrices[test_for][:, 1:-1])
axarr[idx].plot(t_test, y_test, 'o', label="Ground truth")
axarr[idx].plot(t_test, y_pred, 'x', label="Detection")
axarr[idx].grid()
axarr[idx].legend()
axarr[idx].set_title(test_for + " locations")
axarr[idx].set_ylabel("Location id")
plt.xlabel("Time [s]")
plt.show()
if __name__ == '__main__':
exp_root = "/Volumes/DataDrive/igroups_recordings/igroups_experiment_8"
sample_step = 3.0
test_location_detection_with_one_experiment(exp_root, sample_step)
|
mit
|
m860/data-analysis-with-python
|
filter/Stock.py
|
1
|
2079
|
# -*- coding: UTF-8 -*-
import numpy as np
import pandas as pd
import os
from dateutil.parser import parse
def _macd(closes):
ema12 = _ema(closes, 12)
ema26 = _ema(closes, 26)
diff = ema12 - ema26
dea = _ema(diff, 9)
osc = diff - dea
return (osc * 2, diff, dea)
def _ma(closes, cycle=5):
result = np.zeros(cycle)
for i in np.arange(cycle, closes.size):
result = np.append(result, [np.mean(closes[i - cycle:i])])
return result
def _ema(closes, cycle=12):
if closes.size <= 0:
return np.array([])
a = 2 / np.float64((cycle + 1))
ema0 = closes[0]
result = np.array([ema0])
def curema(index, value):
return result[index - 1] + a * (value - result[index - 1])
for i in np.arange(1, closes.size):
result = np.append(result, [curema(i, closes[i])])
return result
def splicsvpath(csvpath):
bn = os.path.basename(csvpath)
filename, ext = os.path.splitext(bn)
return (filename[2:], filename[:2])
class Stock:
def __init__(self, csvpath, cal=True):
self.symbol, self.code = splicsvpath(csvpath)
self.datas = [{
'date': parse(d[1]).date(),
'open': np.float64(d[2]),
'high': np.float64(d[3]),
'low': np.float64(d[4]),
'close': np.float64(d[5]),
'volume': np.float64(d[6])
} for d in pd.read_csv(csvpath).as_matrix()]
if cal:
closes = np.array([d['close'] for d in self.datas])
self.macd, self.div, self.dea = _macd(closes)
self.em5 = _ma(closes, 5)
self.em10 = _ma(closes, 10)
self.em20 = _ma(closes, 20)
self.em30 = _ma(closes, 30)
self.em60 = _ma(closes, 60)
self.ema5 = _ema(closes, 5)
self.ema10 = _ema(closes, 10)
self.ema20 = _ema(closes, 20)
self.ema60 = _ema(closes, 60)
def length(self):
return len(self.datas)
|
mit
|
thesuperzapper/tensorflow
|
tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_functions_test.py
|
62
|
9268
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests feeding functions using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions as ff
from tensorflow.python.platform import test
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def vals_to_list(a):
return {
key: val.tolist() if isinstance(val, np.ndarray) else val
for key, val in a.items()
}
class _FeedingFunctionsTestCase(test.TestCase):
"""Tests for feeding functions."""
def testArrayFeedFnBatchOne(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 16
expected = {
"index_placeholder": [i],
"value_placeholder": [[2 * i, 2 * i + 1]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchFive(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {
"index_placeholder": [15, 0, 1, 2, 3],
"value_placeholder": [[30, 31], [0, 1], [2, 3], [4, 5], [6, 7]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchTwoWithOneEpoch(self):
array = np.arange(5) + 10
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [0, 1],
"value_placeholder": [10, 11]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [2, 3],
"value_placeholder": [12, 13]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [4],
"value_placeholder": [14]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchOneHundred(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 100)
expected = {
"index_placeholder":
list(range(0, 16)) * 6 + list(range(0, 4)),
"value_placeholder":
np.arange(32).reshape([16, 2]).tolist() * 6 +
[[0, 1], [2, 3], [4, 5], [6, 7]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchOneHundredWithSmallerArrayAndMultipleEpochs(self):
array = np.arange(2) + 10
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [0, 1, 0, 1],
"value_placeholder": [10, 11, 10, 11],
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOne(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 32
expected = {
"index_placeholder": [i + 96],
"a_placeholder": [32 + i],
"b_placeholder": [64 + i]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchFive(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {
"index_placeholder": [127, 96, 97, 98, 99],
"a_placeholder": [63, 32, 33, 34, 35],
"b_placeholder": [95, 64, 65, 66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchTwoWithOneEpoch(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 37)
array2 = np.arange(64, 69)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 101))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [96, 97],
"a_placeholder": [32, 33],
"b_placeholder": [64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [98, 99],
"a_placeholder": [34, 35],
"b_placeholder": [66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [100],
"a_placeholder": [36],
"b_placeholder": [68]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOneHundred(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 100)
expected = {
"index_placeholder": list(range(96, 128)) * 3 + list(range(96, 100)),
"a_placeholder": list(range(32, 64)) * 3 + list(range(32, 36)),
"b_placeholder": list(range(64, 96)) * 3 + list(range(64, 68))
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOneHundredWithSmallDataArrayAndMultipleEpochs(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 34)
array2 = np.arange(64, 66)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 98))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [96, 97, 96, 97],
"a_placeholder": [32, 33, 32, 33],
"b_placeholder": [64, 65, 64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testOrderedDictNumpyFeedFnBatchTwoWithOneEpoch(self):
a = np.arange(32, 37)
b = np.arange(64, 69)
x = {"a": a, "b": b}
ordered_dict_x = collections.OrderedDict(
sorted(x.items(), key=lambda t: t[0]))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._OrderedDictNumpyFeedFn(
placeholders, ordered_dict_x, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [0, 1],
"a_placeholder": [32, 33],
"b_placeholder": [64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [2, 3],
"a_placeholder": [34, 35],
"b_placeholder": [66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [4],
"a_placeholder": [36],
"b_placeholder": [68]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testOrderedDictNumpyFeedFnLargeBatchWithSmallArrayAndMultipleEpochs(self):
a = np.arange(32, 34)
b = np.arange(64, 66)
x = {"a": a, "b": b}
ordered_dict_x = collections.OrderedDict(
sorted(x.items(), key=lambda t: t[0]))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._OrderedDictNumpyFeedFn(
placeholders, ordered_dict_x, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [0, 1, 0, 1],
"a_placeholder": [32, 33, 32, 33],
"b_placeholder": [64, 65, 64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
if __name__ == "__main__":
test.main()
|
apache-2.0
|
rbharath/deepchem
|
deepchem/molnet/run_benchmark_models.py
|
1
|
30665
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 6 23:41:26 2017
@author: zqwu
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
import tensorflow as tf
import deepchem
from deepchem.utils.dependencies import xgboost
from deepchem.molnet.preset_hyper_parameters import hps
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
def benchmark_classification(train_dataset,
valid_dataset,
test_dataset,
tasks,
transformers,
n_features,
metric,
model,
test=False,
hyper_parameters=None,
seed=123):
"""
Calculate performance of different models on the specific dataset & tasks
Parameters
----------
train_dataset: dataset struct
dataset used for model training and evaluation
valid_dataset: dataset struct
dataset only used for model evaluation (and hyperparameter tuning)
test_dataset: dataset struct
dataset only used for model evaluation
tasks: list of string
list of targets(tasks, datasets)
transformers: dc.trans.Transformer struct
transformer used for model evaluation
n_features: integer
number of features, or length of binary fingerprints
metric: list of dc.metrics.Metric objects
metrics used for evaluation
model: string, optional (default='tf')
choice of which model to use, should be: rf, tf, tf_robust, logreg,
irv, graphconv, dag, xgb, weave
test: boolean
whether to calculate test_set performance
hyper_parameters: dict, optional (default=None)
hyper parameters for designated model, None = use preset values
Returns
-------
train_scores : dict
predicting results(AUC) on training set
valid_scores : dict
predicting results(AUC) on valid set
test_scores : dict
predicting results(AUC) on test set
"""
train_scores = {}
valid_scores = {}
test_scores = {}
assert model in [
'rf', 'tf', 'tf_robust', 'logreg', 'irv', 'graphconv', 'dag', 'xgb',
'weave'
]
if hyper_parameters is None:
hyper_parameters = hps[model]
model_name = model
if model_name == 'tf':
# Loading hyper parameters
layer_sizes = hyper_parameters['layer_sizes']
weight_init_stddevs = hyper_parameters['weight_init_stddevs']
bias_init_consts = hyper_parameters['bias_init_consts']
dropouts = hyper_parameters['dropouts']
penalty = hyper_parameters['penalty']
penalty_type = hyper_parameters['penalty_type']
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
# Building tensorflow MultiTaskDNN model
model = deepchem.models.TensorflowMultiTaskClassifier(
len(tasks),
n_features,
layer_sizes=layer_sizes,
weight_init_stddevs=weight_init_stddevs,
bias_init_consts=bias_init_consts,
dropouts=dropouts,
penalty=penalty,
penalty_type=penalty_type,
batch_size=batch_size,
learning_rate=learning_rate,
seed=seed)
elif model_name == 'tf_robust':
# Loading hyper parameters
layer_sizes = hyper_parameters['layer_sizes']
weight_init_stddevs = hyper_parameters['weight_init_stddevs']
bias_init_consts = hyper_parameters['bias_init_consts']
dropouts = hyper_parameters['dropouts']
bypass_layer_sizes = hyper_parameters['bypass_layer_sizes']
bypass_weight_init_stddevs = hyper_parameters['bypass_weight_init_stddevs']
bypass_bias_init_consts = hyper_parameters['bypass_bias_init_consts']
bypass_dropouts = hyper_parameters['bypass_dropouts']
penalty = hyper_parameters['penalty']
penalty_type = hyper_parameters['penalty_type']
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
# Building tensorflow robust MultiTaskDNN model
model = deepchem.models.RobustMultitaskClassifier(
len(tasks),
n_features,
layer_sizes=layer_sizes,
weight_init_stddevs=weight_init_stddevs,
bias_init_consts=bias_init_consts,
dropouts=dropouts,
bypass_layer_sizes=bypass_layer_sizes,
bypass_weight_init_stddevs=bypass_weight_init_stddevs,
bypass_bias_init_consts=bypass_bias_init_consts,
bypass_dropouts=bypass_dropouts,
penalty=penalty,
penalty_type=penalty_type,
batch_size=batch_size,
learning_rate=learning_rate,
seed=seed)
elif model_name == 'logreg':
# Loading hyper parameters
penalty = hyper_parameters['penalty']
penalty_type = hyper_parameters['penalty_type']
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
# Building tensorflow logistic regression model
model = deepchem.models.TensorflowLogisticRegression(
len(tasks),
n_features,
penalty=penalty,
penalty_type=penalty_type,
batch_size=batch_size,
learning_rate=learning_rate,
seed=seed)
elif model_name == 'irv':
# Loading hyper parameters
penalty = hyper_parameters['penalty']
penalty_type = hyper_parameters['penalty_type']
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
n_K = hyper_parameters['n_K']
# Transform fingerprints to IRV features
transformer = deepchem.trans.IRVTransformer(n_K, len(tasks), train_dataset)
train_dataset = transformer.transform(train_dataset)
valid_dataset = transformer.transform(valid_dataset)
if test:
test_dataset = transformer.transform(test_dataset)
# Building tensorflow IRV model
model = deepchem.models.TensorflowMultiTaskIRVClassifier(
len(tasks),
K=n_K,
penalty=penalty,
penalty_type=penalty_type,
batch_size=batch_size,
learning_rate=learning_rate,
seed=seed)
elif model_name == 'graphconv':
# Loading hyper parameters
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
n_filters = hyper_parameters['n_filters']
n_fully_connected_nodes = hyper_parameters['n_fully_connected_nodes']
tf.set_random_seed(seed)
graph_model = deepchem.nn.SequentialGraph(n_features)
graph_model.add(
deepchem.nn.GraphConv(int(n_filters), n_features, activation='relu'))
graph_model.add(deepchem.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(deepchem.nn.GraphPool())
graph_model.add(
deepchem.nn.GraphConv(
int(n_filters), int(n_filters), activation='relu'))
graph_model.add(deepchem.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(deepchem.nn.GraphPool())
# Gather Projection
graph_model.add(
deepchem.nn.Dense(
int(n_fully_connected_nodes), int(n_filters), activation='relu'))
graph_model.add(deepchem.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(deepchem.nn.GraphGather(batch_size, activation="tanh"))
model = deepchem.models.MultitaskGraphClassifier(
graph_model,
len(tasks),
n_features,
batch_size=batch_size,
learning_rate=learning_rate,
optimizer_type="adam",
beta1=.9,
beta2=.999)
elif model_name == 'dag':
# Loading hyper parameters
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
n_graph_feat = hyper_parameters['n_graph_feat']
default_max_atoms = hyper_parameters['default_max_atoms']
max_atoms_train = max([mol.get_num_atoms() for mol in train_dataset.X])
max_atoms_valid = max([mol.get_num_atoms() for mol in valid_dataset.X])
max_atoms_test = max([mol.get_num_atoms() for mol in test_dataset.X])
max_atoms = max([max_atoms_train, max_atoms_valid, max_atoms_test])
max_atoms = min([max_atoms, default_max_atoms])
print('Maximum number of atoms: %i' % max_atoms)
reshard_size = 256
transformer = deepchem.trans.DAGTransformer(max_atoms=max_atoms)
train_dataset.reshard(reshard_size)
train_dataset = transformer.transform(train_dataset)
valid_dataset.reshard(reshard_size)
valid_dataset = transformer.transform(valid_dataset)
if test:
test_dataset.reshard(reshard_size)
test_dataset = transformer.transform(test_dataset)
tf.set_random_seed(seed)
graph_model = deepchem.nn.SequentialDAGGraph(
n_features, max_atoms=max_atoms)
graph_model.add(
deepchem.nn.DAGLayer(
n_graph_feat,
n_features,
max_atoms=max_atoms,
batch_size=batch_size))
graph_model.add(deepchem.nn.DAGGather(n_graph_feat, max_atoms=max_atoms))
model = deepchem.models.MultitaskGraphClassifier(
graph_model,
len(tasks),
n_features,
batch_size=batch_size,
learning_rate=learning_rate,
optimizer_type="adam",
beta1=.9,
beta2=.999)
elif model_name == 'weave':
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
n_graph_feat = hyper_parameters['n_graph_feat']
n_pair_feat = hyper_parameters['n_pair_feat']
max_atoms_train = max([mol.get_num_atoms() for mol in train_dataset.X])
max_atoms_valid = max([mol.get_num_atoms() for mol in valid_dataset.X])
max_atoms_test = max([mol.get_num_atoms() for mol in test_dataset.X])
max_atoms = max([max_atoms_train, max_atoms_valid, max_atoms_test])
tf.set_random_seed(seed)
graph_model = deepchem.nn.AlternateSequentialWeaveGraph(
batch_size,
max_atoms=max_atoms,
n_atom_feat=n_features,
n_pair_feat=n_pair_feat)
graph_model.add(deepchem.nn.AlternateWeaveLayer(max_atoms, 75, 14))
graph_model.add(
deepchem.nn.AlternateWeaveLayer(max_atoms, 50, 50, update_pair=False))
graph_model.add(deepchem.nn.Dense(n_graph_feat, 50, activation='tanh'))
graph_model.add(deepchem.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(
deepchem.nn.AlternateWeaveGather(
batch_size, n_input=n_graph_feat, gaussian_expand=True))
model = deepchem.models.MultitaskGraphClassifier(
graph_model,
len(tasks),
n_features,
batch_size=batch_size,
learning_rate=learning_rate,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
elif model_name == 'rf':
# Loading hyper parameters
n_estimators = hyper_parameters['n_estimators']
nb_epoch = None
# Building scikit random forest model
def model_builder(model_dir_rf):
sklearn_model = RandomForestClassifier(
class_weight="balanced", n_estimators=n_estimators, n_jobs=-1)
return deepchem.models.sklearn_models.SklearnModel(sklearn_model,
model_dir_rf)
model = deepchem.models.multitask.SingletaskToMultitask(tasks,
model_builder)
elif model_name == 'xgb':
# Loading hyper parameters
max_depth = hyper_parameters['max_depth']
learning_rate = hyper_parameters['learning_rate']
n_estimators = hyper_parameters['n_estimators']
gamma = hyper_parameters['gamma']
min_child_weight = hyper_parameters['min_child_weight']
max_delta_step = hyper_parameters['max_delta_step']
subsample = hyper_parameters['subsample']
colsample_bytree = hyper_parameters['colsample_bytree']
colsample_bylevel = hyper_parameters['colsample_bylevel']
reg_alpha = hyper_parameters['reg_alpha']
reg_lambda = hyper_parameters['reg_lambda']
scale_pos_weight = hyper_parameters['scale_pos_weight']
base_score = hyper_parameters['base_score']
seed = hyper_parameters['seed']
early_stopping_rounds = hyper_parameters['early_stopping_rounds']
nb_epoch = None
esr = {'early_stopping_rounds': early_stopping_rounds}
# Building xgboost classification model
def model_builder(model_dir_xgb):
xgboost_model = xgboost.XGBClassifier(
max_depth=max_depth,
learning_rate=learning_rate,
n_estimators=n_estimators,
gamma=gamma,
min_child_weight=min_child_weight,
max_delta_step=max_delta_step,
subsample=subsample,
colsample_bytree=colsample_bytree,
colsample_bylevel=colsample_bylevel,
reg_alpha=reg_alpha,
reg_lambda=reg_lambda,
scale_pos_weight=scale_pos_weight,
base_score=base_score,
seed=seed)
return deepchem.models.xgboost_models.XGBoostModel(xgboost_model,
model_dir_xgb, **esr)
model = deepchem.models.multitask.SingletaskToMultitask(tasks,
model_builder)
if nb_epoch is None:
model.fit(train_dataset)
else:
model.fit(train_dataset, nb_epoch=nb_epoch)
train_scores[model_name] = model.evaluate(train_dataset, metric, transformers)
valid_scores[model_name] = model.evaluate(valid_dataset, metric, transformers)
if test:
test_scores[model_name] = model.evaluate(test_dataset, metric, transformers)
return train_scores, valid_scores, test_scores
def benchmark_regression(train_dataset,
valid_dataset,
test_dataset,
tasks,
transformers,
n_features,
metric,
model,
test=False,
hyper_parameters=None,
seed=123):
"""
Calculate performance of different models on the specific dataset & tasks
Parameters
----------
train_dataset: dataset struct
dataset used for model training and evaluation
valid_dataset: dataset struct
dataset only used for model evaluation (and hyperparameter tuning)
test_dataset: dataset struct
dataset only used for model evaluation
tasks: list of string
list of targets(tasks, datasets)
transformers: dc.trans.Transformer struct
transformer used for model evaluation
n_features: integer
number of features, or length of binary fingerprints
metric: list of dc.metrics.Metric objects
metrics used for evaluation
model: string, optional (default='tf_regression')
choice of which model to use, should be: tf_regression, tf_regression_ft,
graphconvreg, rf_regression, dtnn, dag_regression, xgb_regression,
weave_regression
test: boolean
whether to calculate test_set performance
hyper_parameters: dict, optional (default=None)
hyper parameters for designated model, None = use preset values
Returns
-------
train_scores : dict
predicting results(AUC) on training set
valid_scores : dict
predicting results(AUC) on valid set
test_scores : dict
predicting results(AUC) on test set
"""
train_scores = {}
valid_scores = {}
test_scores = {}
assert model in [
'tf_regression', 'tf_regression_ft', 'rf_regression', 'graphconvreg',
'dtnn', 'dag_regression', 'xgb_regression', 'weave_regression'
]
if hyper_parameters is None:
hyper_parameters = hps[model]
model_name = model
if model_name == 'tf_regression':
# Loading hyper parameters
layer_sizes = hyper_parameters['layer_sizes']
weight_init_stddevs = hyper_parameters['weight_init_stddevs']
bias_init_consts = hyper_parameters['bias_init_consts']
dropouts = hyper_parameters['dropouts']
penalty = hyper_parameters['penalty']
penalty_type = hyper_parameters['penalty_type']
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
model = deepchem.models.TensorflowMultiTaskRegressor(
len(tasks),
n_features,
layer_sizes=layer_sizes,
weight_init_stddevs=weight_init_stddevs,
bias_init_consts=bias_init_consts,
dropouts=dropouts,
penalty=penalty,
penalty_type=penalty_type,
batch_size=batch_size,
learning_rate=learning_rate,
seed=seed)
# Building tensorflow MultiTaskDNN model
elif model_name == 'tf_regression_ft':
# Loading hyper parameters
layer_sizes = hyper_parameters['layer_sizes']
weight_init_stddevs = hyper_parameters['weight_init_stddevs']
bias_init_consts = hyper_parameters['bias_init_consts']
dropouts = hyper_parameters['dropouts']
penalty = hyper_parameters['penalty']
penalty_type = hyper_parameters['penalty_type']
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
fit_transformers = [hyper_parameters['fit_transformers'](train_dataset)]
model = deepchem.models.TensorflowMultiTaskFitTransformRegressor(
n_tasks=len(tasks),
n_features=n_features,
layer_sizes=layer_sizes,
weight_init_stddevs=weight_init_stddevs,
bias_init_consts=bias_init_consts,
dropouts=dropouts,
penalty=penalty,
penalty_type=penalty_type,
batch_size=batch_size,
learning_rate=learning_rate,
fit_transformers=fit_transformers,
n_eval=10,
seed=seed)
elif model_name == 'graphconvreg':
# Initialize model folder
# Loading hyper parameters
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
n_filters = hyper_parameters['n_filters']
n_fully_connected_nodes = hyper_parameters['n_fully_connected_nodes']
tf.set_random_seed(seed)
graph_model = deepchem.nn.SequentialGraph(n_features)
graph_model.add(
deepchem.nn.GraphConv(int(n_filters), n_features, activation='relu'))
graph_model.add(deepchem.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(deepchem.nn.GraphPool())
graph_model.add(
deepchem.nn.GraphConv(
int(n_filters), int(n_filters), activation='relu'))
graph_model.add(deepchem.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(deepchem.nn.GraphPool())
# Gather Projection
graph_model.add(
deepchem.nn.Dense(
int(n_fully_connected_nodes), int(n_filters), activation='relu'))
graph_model.add(deepchem.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(deepchem.nn.GraphGather(batch_size, activation="tanh"))
model = deepchem.models.MultitaskGraphRegressor(
graph_model,
len(tasks),
n_features,
batch_size=batch_size,
learning_rate=learning_rate,
optimizer_type="adam",
beta1=.9,
beta2=.999)
elif model_name == 'dtnn':
# Loading hyper parameters
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
n_embedding = hyper_parameters['n_embedding']
n_distance = hyper_parameters['n_distance']
assert len(n_features) == 2, 'DTNN is only applicable to qm datasets'
tf.set_random_seed(seed)
graph_model = deepchem.nn.SequentialDTNNGraph(n_distance=n_distance)
graph_model.add(deepchem.nn.DTNNEmbedding(n_embedding=n_embedding))
graph_model.add(
deepchem.nn.DTNNStep(n_embedding=n_embedding, n_distance=n_distance))
graph_model.add(
deepchem.nn.DTNNStep(n_embedding=n_embedding, n_distance=n_distance))
graph_model.add(deepchem.nn.DTNNGather(n_embedding=n_embedding))
model = deepchem.models.MultitaskGraphRegressor(
graph_model,
len(tasks),
n_embedding,
batch_size=batch_size,
learning_rate=learning_rate,
optimizer_type="adam",
beta1=.9,
beta2=.999)
elif model_name == 'dag_regression':
# Loading hyper parameters
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
n_graph_feat = hyper_parameters['n_graph_feat']
default_max_atoms = hyper_parameters['default_max_atoms']
max_atoms_train = max([mol.get_num_atoms() for mol in train_dataset.X])
max_atoms_valid = max([mol.get_num_atoms() for mol in valid_dataset.X])
max_atoms_test = max([mol.get_num_atoms() for mol in test_dataset.X])
max_atoms = max([max_atoms_train, max_atoms_valid, max_atoms_test])
max_atoms = min([max_atoms, default_max_atoms])
print('Maximum number of atoms: %i' % max_atoms)
reshard_size = 512
transformer = deepchem.trans.DAGTransformer(max_atoms=max_atoms)
train_dataset.reshard(reshard_size)
train_dataset = transformer.transform(train_dataset)
valid_dataset.reshard(reshard_size)
valid_dataset = transformer.transform(valid_dataset)
if test:
test_dataset.reshard(reshard_size)
test_dataset = transformer.transform(test_dataset)
tf.set_random_seed(seed)
graph_model = deepchem.nn.SequentialDAGGraph(
n_features, max_atoms=max_atoms)
graph_model.add(
deepchem.nn.DAGLayer(
n_graph_feat,
n_features,
max_atoms=max_atoms,
batch_size=batch_size))
graph_model.add(deepchem.nn.DAGGather(n_graph_feat, max_atoms=max_atoms))
model = deepchem.models.MultitaskGraphRegressor(
graph_model,
len(tasks),
n_features,
batch_size=batch_size,
learning_rate=learning_rate,
optimizer_type="adam",
beta1=.9,
beta2=.999)
elif model_name == 'weave_regression':
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
n_graph_feat = hyper_parameters['n_graph_feat']
n_pair_feat = hyper_parameters['n_pair_feat']
max_atoms_train = max([mol.get_num_atoms() for mol in train_dataset.X])
max_atoms_valid = max([mol.get_num_atoms() for mol in valid_dataset.X])
max_atoms_test = max([mol.get_num_atoms() for mol in test_dataset.X])
max_atoms = max([max_atoms_train, max_atoms_valid, max_atoms_test])
tf.set_random_seed(seed)
graph_model = deepchem.nn.AlternateSequentialWeaveGraph(
batch_size,
max_atoms=max_atoms,
n_atom_feat=n_features,
n_pair_feat=n_pair_feat)
graph_model.add(deepchem.nn.AlternateWeaveLayer(max_atoms, 75, 14))
graph_model.add(
deepchem.nn.AlternateWeaveLayer(max_atoms, 50, 50, update_pair=False))
graph_model.add(deepchem.nn.Dense(n_graph_feat, 50, activation='tanh'))
graph_model.add(deepchem.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(
deepchem.nn.AlternateWeaveGather(
batch_size, n_input=n_graph_feat, gaussian_expand=True))
model = deepchem.models.MultitaskGraphRegressor(
graph_model,
len(tasks),
n_features,
batch_size=batch_size,
learning_rate=learning_rate,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
elif model_name == 'rf_regression':
# Loading hyper parameters
n_estimators = hyper_parameters['n_estimators']
nb_epoch = None
# Building scikit random forest model
def model_builder(model_dir_rf_regression):
sklearn_model = RandomForestRegressor(
n_estimators=n_estimators, n_jobs=-1)
return deepchem.models.sklearn_models.SklearnModel(
sklearn_model, model_dir_rf_regression)
model = deepchem.models.multitask.SingletaskToMultitask(tasks,
model_builder)
elif model_name == 'xgb_regression':
# Loading hyper parameters
max_depth = hyper_parameters['max_depth']
learning_rate = hyper_parameters['learning_rate']
n_estimators = hyper_parameters['n_estimators']
gamma = hyper_parameters['gamma']
min_child_weight = hyper_parameters['min_child_weight']
max_delta_step = hyper_parameters['max_delta_step']
subsample = hyper_parameters['subsample']
colsample_bytree = hyper_parameters['colsample_bytree']
colsample_bylevel = hyper_parameters['colsample_bylevel']
reg_alpha = hyper_parameters['reg_alpha']
reg_lambda = hyper_parameters['reg_lambda']
scale_pos_weight = hyper_parameters['scale_pos_weight']
base_score = hyper_parameters['base_score']
seed = hyper_parameters['seed']
early_stopping_rounds = hyper_parameters['early_stopping_rounds']
nb_epoch = None
esr = {'early_stopping_rounds': early_stopping_rounds}
# Building xgboost classification model
def model_builder(model_dir_xgb):
xgboost_model = xgboost.XGBRegressor(
max_depth=max_depth,
learning_rate=learning_rate,
n_estimators=n_estimators,
gamma=gamma,
min_child_weight=min_child_weight,
max_delta_step=max_delta_step,
subsample=subsample,
colsample_bytree=colsample_bytree,
colsample_bylevel=colsample_bylevel,
reg_alpha=reg_alpha,
reg_lambda=reg_lambda,
scale_pos_weight=scale_pos_weight,
base_score=base_score,
seed=seed)
return deepchem.models.xgboost_models.XGBoostModel(xgboost_model,
model_dir_xgb, **esr)
model = deepchem.models.multitask.SingletaskToMultitask(tasks,
model_builder)
print('-----------------------------')
print('Start fitting: %s' % model_name)
if nb_epoch is None:
model.fit(train_dataset)
else:
model.fit(train_dataset, nb_epoch=nb_epoch)
train_scores[model_name] = model.evaluate(train_dataset, metric, transformers)
valid_scores[model_name] = model.evaluate(valid_dataset, metric, transformers)
if test:
test_scores[model_name] = model.evaluate(test_dataset, metric, transformers)
return train_scores, valid_scores, test_scores
def low_data_benchmark_classification(train_dataset,
valid_dataset,
n_features,
metric,
model='siamese',
hyper_parameters=None,
seed=123):
"""
Calculate low data benchmark performance
Parameters
----------
train_dataset : dataset struct
loaded dataset, ConvMol struct, used for training
valid_dataset : dataset struct
loaded dataset, ConvMol struct, used for validation
n_features : integer
number of features, or length of binary fingerprints
metric: list of dc.metrics.Metric objects
metrics used for evaluation
model : string, optional (default='siamese')
choice of which model to use, should be: siamese, attn, res
hyper_parameters: dict, optional (default=None)
hyper parameters for designated model, None = use preset values
Returns
-------
valid_scores : dict
predicting results(AUC) on valid set
"""
train_scores = {} # train set not evaluated in low data model
valid_scores = {}
assert model in ['siamese', 'attn', 'res']
if hyper_parameters is None:
hyper_parameters = hps[model]
# Loading hyperparameters
# num positive/negative ligands
n_pos = hyper_parameters['n_pos']
n_neg = hyper_parameters['n_neg']
# Set batch sizes for network
test_batch_size = hyper_parameters['test_batch_size']
support_batch_size = n_pos + n_neg
# Model structure
n_filters = hyper_parameters['n_filters']
n_fully_connected_nodes = hyper_parameters['n_fully_connected_nodes']
# Traning settings
nb_epochs = hyper_parameters['nb_epochs']
n_train_trials = hyper_parameters['n_train_trials']
n_eval_trials = hyper_parameters['n_eval_trials']
learning_rate = hyper_parameters['learning_rate']
tf.set_random_seed(seed)
support_graph = deepchem.nn.SequentialSupportGraph(n_features)
prev_features = n_features
for count, n_filter in enumerate(n_filters):
support_graph.add(
deepchem.nn.GraphConv(int(n_filter), prev_features, activation='relu'))
support_graph.add(deepchem.nn.GraphPool())
prev_features = int(n_filter)
for count, n_fcnode in enumerate(n_fully_connected_nodes):
support_graph.add(
deepchem.nn.Dense(int(n_fcnode), prev_features, activation='tanh'))
prev_features = int(n_fcnode)
support_graph.add_test(
deepchem.nn.GraphGather(test_batch_size, activation='tanh'))
support_graph.add_support(
deepchem.nn.GraphGather(support_batch_size, activation='tanh'))
if model in ['siamese']:
pass
elif model in ['attn']:
max_depth = hyper_parameters['max_depth']
support_graph.join(
deepchem.nn.AttnLSTMEmbedding(test_batch_size, support_batch_size,
prev_features, max_depth))
elif model in ['res']:
max_depth = hyper_parameters['max_depth']
support_graph.join(
deepchem.nn.ResiLSTMEmbedding(test_batch_size, support_batch_size,
prev_features, max_depth))
model_low_data = deepchem.models.SupportGraphClassifier(
support_graph,
test_batch_size=test_batch_size,
support_batch_size=support_batch_size,
learning_rate=learning_rate)
print('-------------------------------------')
print('Start fitting by low data model: ' + model)
# Fit trained model
model_low_data.fit(
train_dataset,
nb_epochs=nb_epochs,
n_episodes_per_epoch=n_train_trials,
n_pos=n_pos,
n_neg=n_neg,
log_every_n_samples=50)
# Evaluating low data model
valid_scores[model] = model_low_data.evaluate(
valid_dataset, metric, n_pos, n_neg, n_trials=n_eval_trials)
return valid_scores
|
mit
|
antgonza/qiime
|
scripts/categorized_dist_scatterplot.py
|
15
|
6299
|
#!/usr/bin/env python
# File created on 19 Jan 2011
from __future__ import division
__author__ = "Justin Kuczynski"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Justin Kuczynski"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Justin Kuczynski"
__email__ = "[email protected]"
from qiime.util import make_option
import os
import warnings
warnings.filterwarnings('ignore', 'Not using MPI as mpi4py not found')
from qiime.parse import parse_distmat_to_dict, parse_mapping_file,\
mapping_file_to_dict
from qiime.util import parse_command_line_parameters
import numpy
import matplotlib.pyplot as plt
from qiime.categorized_dist_scatterplot import get_avg_dists, get_sam_ids
script_info = {}
script_info[
'brief_description'] = "Create a categorized distance scatterplot representing average distances between samples, broken down by categories"
script_info[
'script_description'] = "Create a figure representing average distances between samples, broken down by categories. I call it a 'categorized distance scatterplot'. See script usage for more details. The mapping file specifies the relevant data - if you have e.g. 'N/A' values or samples you don't want included, first use filter_samples_from_otu_table.py to remove unwanted samples from the mapping file, and thus the analysis. Note that the resulting plot will include only samples in both the mapping file AND the distance matrix."
script_info['script_usage'] = [(
"Canonical Example:",
"Split samples by country. Within each country compare each child to all adults. Plot the average distance from that child to all adults, vs. the age of that child",
"python categorized_dist_scatterplot.py -m map.txt -d unifrac_distance.txt -c Country -p AgeCategory:Child -s AgeCategory:Adult -a AgeYears -o fig1.png"),
("Example 2:",
"Same as above, but compares Child with all other categories (e.g.: NA, Infant, etc.)",
"python categorized_dist_scatterplot.py -m map.txt -d unifrac_distance.txt -c Country -p AgeCategory:Child -a AgeYears -o fig1.svg")]
script_info[
'output_description'] = "a figure and the text dat for that figure "
script_info['required_options'] = [
make_option('-m', '--map', type='existing_filepath',
help='mapping file'),
make_option('-d', '--distance_matrix', type='existing_filepath',
help='distance matrix'),
make_option('-p', '--primary_state', type='string',
help="Samples matching this state will be plotted. E.g.: AgeCategory:Child . See qiime's filter_samples_from_otu_table.py for more syntax options"),
make_option('-a', '--axis_category', type='string',
help='this will form the horizontal axis of the figure, e.g.: AgeYears . Must be numbers'),
make_option('-o', '--output_path', type='new_dirpath',
help='output figure, filename extention determines format. E.g.: "fig1.png" or similar. A "fig1.txt" or similar will also be created with the data underlying the figure'),
]
script_info['optional_options'] = [
make_option('-c', '--colorby', type='string',
help='samples will first be separated by this column of the mapping file. They will be colored by this column of the mapping file, and all comparisons will be done only among samples with the same value in this column. e.g.: Country. You may omit -c, and the samples will not be separated'),
make_option('-s', '--secondary_state', type='string',
help='all samples matching the primary state will be compared to samples matcthing this secondary state. E.g.: AgeCategory:Adult'),
]
script_info['version'] = __version__
def main():
option_parser, opts, args =\
parse_command_line_parameters(**script_info)
map_data, map_header, map_comments = parse_mapping_file(
open(opts.map, 'U'))
map_dict = mapping_file_to_dict(map_data, map_header)
distdict = parse_distmat_to_dict(open(opts.distance_matrix, 'U'))
if opts.colorby is None:
colorby_cats = [None]
else:
colorby_idx = map_header.index(opts.colorby)
colorby_cats = list(set([map_data[i][colorby_idx] for
i in range(len(map_data))]))
textfilename = os.path.splitext(opts.output_path)[0] + '.txt'
text_fh = open(textfilename, 'w')
text_fh.write(opts.axis_category + '\tdistance\tSampleID' + '\n')
colorby_cats.sort()
plt.figure()
for cat_num, cat in enumerate(colorby_cats):
# collect the primary and secondary samples within this category
state1_samids, state2_samids = get_sam_ids(map_data, map_header,
opts.colorby, cat, opts.primary_state, opts.secondary_state)
state1_samids =\
list(set(state1_samids).intersection(set(distdict.keys())))
state2_samids =\
list(set(state2_samids).intersection(set(distdict.keys())))
if state1_samids == [] or state2_samids == [] or \
(len(state1_samids) == 1 and state1_samids == state2_samids):
raise RuntimeError("one category of samples didn't have any valid" +
" distances. try eliminating samples from -p or -s, or changing" +
" your mapping file with filter_samples_from_otu_table.py")
# go through dmtx
state1_avg_dists = get_avg_dists(
state1_samids,
state2_samids,
distdict)
# plot
xvals = [float(map_dict[sam][opts.axis_category]) for
sam in state1_samids]
try:
color = plt.cm.jet(cat_num / (len(colorby_cats) - 1))
except ZeroDivisionError: # only one cat
color = 'b'
plt.scatter(xvals, state1_avg_dists, edgecolors=color, alpha=.5,
facecolors='none')
plt.xlabel(opts.axis_category)
plt.ylabel('average distance')
lines = [str(xvals[i]) + '\t' + str(state1_avg_dists[i]) +
'\t' + state1_samids[i] + '\n' for i in range(len(xvals))]
text_fh.writelines(lines)
if opts.colorby is not None:
plt.legend(colorby_cats)
plt.savefig(opts.output_path)
if __name__ == "__main__":
main()
|
gpl-2.0
|
dennisobrien/bokeh
|
bokeh/core/property/bases.py
|
2
|
15482
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide base classes for the Bokeh property system.
.. note::
These classes form part of the very low-level machinery that implements
the Bokeh model and property system. It is unlikely that any of these
classes or their methods will be applicable to any standard usage or to
anyone who is not directly developing on Bokeh's own infrastructure.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from copy import copy
import types
# External imports
from six import string_types
import numpy as np
# Bokeh imports
from ...util.dependencies import import_optional
from ...util.string import nice_join
from ..has_props import HasProps
from .descriptor_factory import PropertyDescriptorFactory
from .descriptors import BasicPropertyDescriptor
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
pd = import_optional('pandas')
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
__all__ = (
'ContainerProperty',
'DeserializationError',
'PrimitiveProperty',
'Property',
'validation_on',
)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class DeserializationError(Exception):
pass
class Property(PropertyDescriptorFactory):
''' Base class for Bokeh property instances, which can be added to Bokeh
Models.
Args:
default (obj or None, optional) :
A default value for attributes created from this property to
have (default: None)
help (str or None, optional) :
A documentation string for this property. It will be automatically
used by the :ref:`bokeh.sphinxext.bokeh_prop` extension when
generating Spinx documentation. (default: None)
serialized (bool, optional) :
Whether attributes created from this property should be included
in serialization (default: True)
readonly (bool, optional) :
Whether attributes created from this property are read-only.
(default: False)
'''
# This class attribute is controlled by external helper API for validation
_should_validate = True
def __init__(self, default=None, help=None, serialized=True, readonly=False):
# This is how the descriptor is created in the class declaration.
self._serialized = False if readonly else serialized
self._readonly = readonly
self._default = default
self.__doc__ = help
self.alternatives = []
self.assertions = []
def __str__(self):
return self.__class__.__name__
@classmethod
def _sphinx_prop_link(cls):
''' Generate a sphinx :class: link to this property.
'''
return ":class:`~bokeh.core.properties.%s` " % cls.__name__
@staticmethod
def _sphinx_model_link(name):
''' Generate a sphinx :class: link to given named model.
'''
return ":class:`~%s` " % name
def _sphinx_type(self):
''' Generate a Sphinx-style reference to this type for documentation
automation purposes.
'''
return self._sphinx_prop_link()
def make_descriptors(self, base_name):
''' Return a list of ``BasicPropertyDescriptor`` instances to install
on a class, in order to delegate attribute access to this property.
Args:
name (str) : the name of the property these descriptors are for
Returns:
list[BasicPropertyDescriptor]
The descriptors returned are collected by the ``MetaHasProps``
metaclass and added to ``HasProps`` subclasses during class creation.
'''
return [ BasicPropertyDescriptor(base_name, self) ]
def _may_have_unstable_default(self):
''' False if we have a default that is immutable, and will be the
same every time (some defaults are generated on demand by a function
to be called).
'''
return isinstance(self._default, types.FunctionType)
@classmethod
def _copy_default(cls, default):
''' Return a copy of the default, or a new value if the default
is specified by a function.
'''
if not isinstance(default, types.FunctionType):
return copy(default)
else:
return default()
def _raw_default(self):
''' Return the untransformed default value.
The raw_default() needs to be validated and transformed by
prepare_value() before use, and may also be replaced later by
subclass overrides or by themes.
'''
return self._copy_default(self._default)
def themed_default(self, cls, name, theme_overrides):
''' The default, transformed by prepare_value() and the theme overrides.
'''
overrides = theme_overrides
if overrides is None or name not in overrides:
overrides = cls._overridden_defaults()
if name in overrides:
default = self._copy_default(overrides[name])
else:
default = self._raw_default()
return self.prepare_value(cls, name, default)
@property
def serialized(self):
''' Whether the property should be serialized when serializing an object.
This would be False for a "virtual" or "convenience" property that duplicates
information already available in other properties, for example.
'''
return self._serialized
@property
def readonly(self):
''' Whether this property is read-only.
Read-only properties may only be modified by the client (i.e., by BokehJS
in the browser).
'''
return self._readonly
def matches(self, new, old):
''' Whether two parameters match values.
If either ``new`` or ``old`` is a NumPy array or Pandas Series or Index,
then the result of ``np.array_equal`` will determine if the values match.
Otherwise, the result of standard Python equality will be returned.
Returns:
True, if new and old match, False otherwise
'''
if isinstance(new, np.ndarray) or isinstance(old, np.ndarray):
return np.array_equal(new, old)
if pd:
if isinstance(new, pd.Series) or isinstance(old, pd.Series):
return np.array_equal(new, old)
if isinstance(new, pd.Index) or isinstance(old, pd.Index):
return np.array_equal(new, old)
try:
# this handles the special but common case where there is a dict with array
# or series as values (e.g. the .data property of a ColumnDataSource)
if isinstance(new, dict) and isinstance(old, dict):
if set(new.keys()) != set(old.keys()):
return False
return all(self.matches(new[k], old[k]) for k in new)
return new == old
# if the comparison fails for some reason, just punt and return no-match
except ValueError:
return False
def from_json(self, json, models=None):
''' Convert from JSON-compatible values into a value for this property.
JSON-compatible values are: list, dict, number, string, bool, None
'''
return json
def serialize_value(self, value):
''' Change the value into a JSON serializable format.
'''
return value
def transform(self, value):
''' Change the value into the canonical format for this property.
Args:
value (obj) : the value to apply transformation to.
Returns:
obj: transformed value
'''
return value
def validate(self, value, detail=True):
''' Determine whether we can set this property from this value.
Validation happens before transform()
Args:
value (obj) : the value to validate against this property type
detail (bool, options) : whether to construct detailed exceptions
Generating detailed type validation error messages can be
expensive. When doing type checks internally that will not
escape exceptions to users, these messages can be skipped
by setting this value to False (default: True)
Returns:
None
Raises:
ValueError if the value is not valid for this property type
'''
pass
def is_valid(self, value):
''' Whether the value passes validation
Args:
value (obj) : the value to validate against this property type
Returns:
True if valid, False otherwise
'''
try:
if validation_on():
self.validate(value, False)
except ValueError:
return False
else:
return True
@classmethod
def wrap(cls, value):
''' Some property types need to wrap their values in special containers, etc.
'''
return value
def prepare_value(self, obj_or_cls, name, value):
try:
if validation_on():
self.validate(value)
except ValueError as e:
for tp, converter in self.alternatives:
if tp.is_valid(value):
value = converter(value)
break
else:
raise e
else:
value = self.transform(value)
if isinstance(obj_or_cls, HasProps):
obj = obj_or_cls
for fn, msg_or_fn in self.assertions:
if isinstance(fn, bool):
result = fn
else:
result = fn(obj, value)
assert isinstance(result, bool)
if not result:
if isinstance(msg_or_fn, string_types):
raise ValueError(msg_or_fn)
else:
msg_or_fn(obj, name, value)
return self.wrap(value)
@property
def has_ref(self):
return False
def accepts(self, tp, converter):
''' Declare that other types may be converted to this property type.
Args:
tp (Property) :
A type that may be converted automatically to this property
type.
converter (callable) :
A function accepting ``value`` to perform conversion of the
value to this property type.
Returns:
self
'''
tp = ParameterizedProperty._validate_type_param(tp)
self.alternatives.append((tp, converter))
return self
def asserts(self, fn, msg_or_fn):
''' Assert that prepared values satisfy given conditions.
Assertions are intended in enforce conditions beyond simple value
type validation. For instance, this method can be use to assert that
the columns of a ``ColumnDataSource`` all collectively have the same
length at all times.
Args:
fn (callable) :
A function accepting ``(obj, value)`` that returns True if the value
passes the assertion, or False othwise
msg_or_fn (str or callable) :
A message to print in case the assertion fails, or a function
accepting ``(obj, name, value)`` to call in in case the assertion
fails.
Returns:
self
'''
self.assertions.append((fn, msg_or_fn))
return self
class ParameterizedProperty(Property):
''' A base class for Properties that have type parameters, e.g.
``List(String)``.
'''
@staticmethod
def _validate_type_param(type_param):
if isinstance(type_param, type):
if issubclass(type_param, Property):
return type_param()
else:
type_param = type_param.__name__
elif isinstance(type_param, Property):
return type_param
raise ValueError("expected a Propertyas type parameter, got %s" % type_param)
@property
def type_params(self):
raise NotImplementedError("abstract method")
@property
def has_ref(self):
return any(type_param.has_ref for type_param in self.type_params)
class PrimitiveProperty(Property):
''' A base class for simple property types.
Subclasses should define a class attribute ``_underlying_type`` that is
a tuple of acceptable type values for the property.
Example:
A trivial version of a ``Float`` property might look like:
.. code-block:: python
class Float(PrimitiveProperty):
_underlying_type = (numbers.Real,)
'''
_underlying_type = None
def validate(self, value, detail=True):
super(PrimitiveProperty, self).validate(value, detail)
if not (value is None or isinstance(value, self._underlying_type)):
msg = "" if not detail else "expected a value of type %s, got %s of type %s" % (
nice_join([ cls.__name__ for cls in self._underlying_type ]), value, type(value).__name__
)
raise ValueError(msg)
def from_json(self, json, models=None):
if json is None or isinstance(json, self._underlying_type):
return json
else:
expected = nice_join([ cls.__name__ for cls in self._underlying_type ])
raise DeserializationError("%s expected %s, got %s" % (self, expected, json))
def _sphinx_type(self):
return self._sphinx_prop_link()
class ContainerProperty(ParameterizedProperty):
''' A base class for Container-like type properties.
'''
def _may_have_unstable_default(self):
# all containers are mutable, so the default can be modified
return True
def validation_on():
''' Check if property validation is currently active
Returns:
bool
'''
return Property._should_validate
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
bsd-3-clause
|
dsquareindia/scikit-learn
|
examples/applications/plot_model_complexity_influence.py
|
323
|
6372
|
"""
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
|
bsd-3-clause
|
giorgiop/scikit-learn
|
examples/model_selection/plot_roc_crossval.py
|
21
|
3477
|
"""
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.model_selection.cross_val_score`,
:ref:`sphx_glr_auto_examples_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(n_splits=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
colors = cycle(['cyan', 'indigo', 'seagreen', 'yellow', 'blue', 'darkorange'])
lw = 2
i = 0
for (train, test), color in zip(cv.split(X, y), colors):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=lw, color=color,
label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
i += 1
plt.plot([0, 1], [0, 1], linestyle='--', lw=lw, color='k',
label='Luck')
mean_tpr /= cv.get_n_splits(X, y)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, color='g', linestyle='--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=lw)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
|
bsd-3-clause
|
idlead/scikit-learn
|
sklearn/utils/metaestimators.py
|
283
|
2353
|
"""Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# Licence: BSD
from operator import attrgetter
from functools import update_wrapper
__all__ = ['if_delegate_has_method']
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if the ``attribute_name`` is not present on the base object.
This allows ducktyping of the decorated method based on ``attribute_name``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, attribute_name):
self.fn = fn
self.get_attribute = attrgetter(attribute_name)
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self.get_attribute(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
>>> from sklearn.utils.metaestimators import if_delegate_has_method
>>>
>>>
>>> class MetaEst(object):
... def __init__(self, sub_est):
... self.sub_est = sub_est
...
... @if_delegate_has_method(delegate='sub_est')
... def predict(self, X):
... return self.sub_est.predict(X)
...
>>> class HasPredict(object):
... def predict(self, X):
... return X.sum(axis=1)
...
>>> class HasNoPredict(object):
... pass
...
>>> hasattr(MetaEst(HasPredict()), 'predict')
True
>>> hasattr(MetaEst(HasNoPredict()), 'predict')
False
"""
return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
|
bsd-3-clause
|
arthurfait/HMM-3
|
hmm/State.py
|
2
|
20970
|
'''
This file contains the definitions of:
node_tr => transitions class
node_em => emission class
node_em_gmm => gaussian mixture model emission class (derived class of node_em)
State => state class which contains
one pointer to a node_tr object
one pointer to a node_em object
Extending the code of Piero Fariselli to include the GMM for each state, aka nodes.
When GMM are used the node_em class, emission class, must be overloaded/over-ridden
to rather utilize the Gaussian Mixtures, node_em_gmm.
'''
from Def import DEF
import numpy as NUM
import sys
from sklearn.covariance import graph_lasso
######################################
######## class node_trans ############
######################################
class node_tr:
'''
this class implement the node transitions
'''
def __init__(self,name,tr):
'''
__init__(self,name,tr)
name = identifier, tr = transition probabilities
'''
self.name=name # "esempio '_tr1','_tr2' ...
self.len=len(tr)
self._tr=tr # Vettore di transizioni
# computes the log of the transitions
self._ln_tr=[DEF.big_negative]*self.len
for i in range(self.len):
self._ln_tr[i] = self.__safe_log(self._tr[i])
#if(self._tr[i] > DEF.tolerance):
# self._ln_tr[i]=NUM.log(self._tr[i])
def __safe_log(self,x):
if x < DEF.small_positive:
return DEF.big_negative
else:
return NUM.log(x)
def tr(self,i):
'''
tr(self,i) -> transition probability between self->i-th
'''
#assert(i<self.len)
return self._tr[i]
def ln_tr(self,i):
'''
ln(tr(self,i)) -> transition probability between self->i-th
'''
#assert(i<self.len)
return self._ln_tr[i]
def set_tr(self,i,value):
'''
set_tr(self,i,value) sets the i-th transition of self to value
'''
#assert(i<self.len)
self._tr[i]=value
if(self._tr[i] > DEF.tolerance):
self._ln_tr[i]=NUM.log(self._tr[i])
else:
self._ln_tr[i]=DEF.big_negative
######################################
######################################
######## class node_em ############
######################################
class node_em:
'''
this class implement the node emissions and is a base class for discrete emissions and gmm
'''
def __init__(self,name,em):
'''
__init__(self,name,em=None)
name = identifier, em = emission probabilities
'''
self.name=name # "esempio '_tr1','_tr2' ...
self.len=len(em) # dimensione del vettore
self._em=em # Vettore emissioni
# computes the log of the emissions
self._ln_em=[DEF.big_negative]*self.len
for i in range(self.len):
if(self._em[i] > DEF.tolerance):
self._ln_em[i]=NUM.log(self._em[i])
def __safe_log(self,x):
if x < DEF.small_positive:
return DEF.big_negative
else:
return NUM.log(x)
def em(self,i):
'''
em(self,i) -> emission probability of discrete symbol i in the current node
'''
#assert(i<self.len)
return self._em[i]
def ln_em(self,i):
'''
em(self,i) -> emission probability of discrete symbol i in the current node
'''
#assert(i<self.len)
return self._ln_em[i]
def normalise_mix_weights(self):
'''
normalise the mixture weights for this state so that they sum to one.
This is for the GMM version, which is accessed via dynamic binding and
polymorphism.
'''
return True
def get_emissions(self):
'''
Returns the list of discrete emission probabilities
'''
return self._em
def get_type_name(self):
'''
returns the type name for this node emission object. This is used for setting the precomputed emissions matrix.
'''
return "node_em"
######################################
######################################
######## class node_gmm ############
######################################
class node_em_gmm(node_em):
'''
this class implements the node emissions using gaussian mixture models
'''
def __init__(self,name,mix_weights,mix_densities):
'''
__init__(self,name,em=None)
name = identifier,
mix_weights = list of weights for the mixtures,
mix_densities = list of mixture_density objects
'''
self.name=name # "esempio '_tr1','_tr2' ...
self._mix_weights = mix_weights
self._mix_num = len(self._mix_weights)
self._mix_densities = mix_densities # In the literature this is referred to as a codebook
if self._mix_num > 0:
self._len = len(self._mix_densities[0].get_mean()) # shape/dim of the GMM parameters
else:
self._len = 0 #no mixtures in a null/silent state
def __safe_log(self,x):
if x < DEF.small_positive:
return DEF.big_negative
else:
return NUM.log(x)
def get_mix_num(self):
'''
mix_num(self) -> number of gaussian mixtures for this node
'''
#assert(i<self.len)
return self._mix_num
def get_mix_weight(self,i):
'''
mix_weight(self,i) -> returns the i-th mixture's weight for this node
'''
#assert((i>=0) and (i<self._mix_num)) # assert the index of the mixture weight
return self._mix_weights[i]
def get_mixture_density(self,i):
'''
Returns a mixture_density object with corresponding parameters
'''
return self._mix_densities[i]
def get_em_mix_name(self):
'''
Returns the name of the mixture densities for this state
'''
return self._mix_densities[0].get_name()
def get_mixtures(self):
'''
Returns a reference to this state's list of mixture densities
'''
return self._mix_densities
def get_emissions(self):
'''
returns the node_em_gmm object instance
'''
return self
def set_mix_num(self,value):
'''
set_mix_num(self,value) set the number of mixtures for this node
'''
#assert(value >= 0)
self._mix_num=int(value)
def set_mix_weight(self,i,weight):
'''
set the i-th mixture's weight value for this node
'''
if ((i>=0) and (i < self._mix_num)):
self._mix_weights[i] = float(weight)
else:
print(self._name + ": cannot set mixture " + str(i) + " weight.\n")
def set_mix_density(self,i,mix_density):
'''
Set the i-th mixture's density function with mean, covariance, and precision matrices
'''
self._mix_densities[i] = mix_density
def normalise_mix_weights(self):
'''
normalise the mixture weights for this state so that they sum to one.
'''
err_tol = 6 #five decimal places for precision
sum_val = 1.0 #the value which the mixture weights must sum too
weight_sum = float(sum(self._mix_weights))
if (NUM.absolute(NUM.round(weight_sum,err_tol) - sum_val) < DEF.tolerance):
self._mix_weights = [w/weight_sum for w in self._mix_weights]
#return lets the user know if the weight were already normalised or not
return (NUM.absolute(NUM.round(weight_sum,err_tol) - sum_val) < DEF.tolerance)
def em(self,vec):
'''
em(self,vec) -> compute the probability of emission of vec in the node/state
with the mixtures. The default option is the multivariate gaussians, but any
pdf with the required properties will do.
'''
lprob = self.ln_em(vec)
prob = NUM.exp(lprob)
return prob
def ln_em(self,vec):
'''
ln_em(self,vec) -> compute the ln of probability of emission of vec in the node/state
with the gaussian mixtures.
'''
assert(len(vec) == self._len)
lprob = 0.0
for i in range(self._mix_num):
linc = self.__safe_log(self._mix_weights[i])+self._mix_densities[i].ln_em(vec)
lprob += linc
return lprob
def get_type_name(self):
'''
returns the type name for this node emission object. This is used for setting the precomputed emissions matrix.
'''
return "node_em_gmm"
######################################
class mixture_density:
'''
A class represent the mixture densities used for the hidden state emissions.
'''
def __init__(self,name,mean,covariance,precision=None):
self._name = name
self._mean = mean
self._cov = covariance
self._precision = precision
#compute the mix_precisions using spare inverse covariance estimation by graph lasso method
# must use sparse inverse covariance estimation by graphical lasso method as the determinant of a
# non-diagonal covariance matrix will be "too small" and cause numerical issues
# These are set during the Baum-Welch learning phase in the set_param() function
'''
for i in range(self._mix_num):
#compute and set the covariance precision matrices corresponding to the newly updated empirical covariance matrices
# here we use the empirical covariance as input to the graphlasso algorithm, with a predefined
# alpha: positive float
# The regularization parameter: the higher alpha, the more regularization, the sparser the inverse covariance
emp_cov = self._mix_covars[i]
glasso_cov, glasso_precision = graph_lasso(emp_cov, DEF.alpha, mode='cd', tol=1e-4, max_iter=100,verbose=False)
self._mix_covars[i] = glasso_cov
self._mix_precisions[i] = glasso_precision
'''
def __safe_log(self,x):
if x < DEF.small_positive:
return DEF.big_negative
else:
return NUM.log(x)
def get_name(self):
return self._name
def get_mean(self):
return self._mean
def get_cov(self):
return self._cov
def get_precision(self):
return self._precision
def set_name(self,name):
self._name = name
def set_mean(self,mean):
self._mean = mean
def set_cov(self,covariance):
self._cov = covariance
def set_precision(self,precision):
self._precision = precision
def em(self,vec):
prob = self.multivariate_gaussian(vec)
return prob
def ln_em(self,vec):
ln_prob = self.ln_multivariate_gaussian(vec)
return ln_prob
def multivariate_gaussian(self,vec):
'''
multi_gaussian(self,vec,mean,covar,precision) -> compute the probability for a vec
using the multivariate gaussian with mean, covar and precision.
This only works on positive semi-definite matrices.
If the variance-covariance matrix is semi-definite then one of the random
variables is a linear combination of the others and is therefore degenerate.
'''
result = 0.0
dim = len(self._mean)
det_covar = NUM.linalg.det(self._cov)
if ((len(vec) == dim) and ((dim,dim) == self._cov.shape)):
if (det_covar == 0):
sys.stderr.write("singular covariance matrix")
sys.exit(-1)
else:
if self._precision == None:
self._precision = NUM.matrix(NUM.linalg.pinv(self._cov))
vec_mean = NUM.matrix(NUM.matrix(vec) - NUM.matrix(self._mean))
#frac = NUM.power(2.0*NUM.pi,0.5*dim)*NUM.power(det_covar,0.5)
part1 = -0.5*dim*self.__safe_log(2.0*NUM.pi)
part2 = -0.5*self.__safe_log(det_covar)
part3 = -0.5*float(((vec_mean*self._precision)*vec_mean.T))
log_result = part1 + part2 + part3
result = NUM.exp(log_result)
else:
sys.stderr.write("The dimensions of the input don't match.")
sys.exit(-1)
#if result == 0.0:
# print "prob %f"%result
# print self._cov, vec
# print "Determinant of Covar %f"%det_covar
# print "Part1 %f, Part2 %f, Part3 %f"%(part1,part2,part3)
return result
def ln_multivariate_gaussian(self,vec):
'''
multi_gaussian(self,vec,mean,covar,precision) -> compute the probability for a vec
using the multivariate gaussian with mean, covar and precision.
This only works on positive semi-definite matrices.
If the variance-covariance matrix is semi-definite then one of the random
variables is a linear combination of the others and is therefore degenerate.
'''
result = 0.0
dim = len(self._mean)
det_covar = NUM.linalg.det(self._cov)
if ((len(vec) == dim) and ((dim,dim) == self._cov.shape)):
if (det_covar == 0):
sys.stderr.write("singular covariance matrix")
sys.exit(-1)
else:
if self._precision == None:
self._precision = NUM.matrix(NUM.linalg.pinv(self._cov))
vec_mean = NUM.matrix(NUM.matrix(vec) - NUM.matrix(self._mean))
#frac = NUM.power(2.0*NUM.pi,0.5*dim)*NUM.power(det_covar,0.5)
part1 = -0.5*dim*self.__safe_log(2.0*NUM.pi)
part2 = -0.5*self.__safe_log(det_covar)
part3 = -0.5*float(((vec_mean*self._precision)*vec_mean.T))
log_result = part1 + part2 + part3
#result = NUM.exp(log_result)
result = log_result
else:
sys.stderr.write("The dimensions of the input don't match.")
sys.exit(-1)
#if result == 0.0:
# print "prob %f"%result
# print self._cov, vec
# print "Determinant of Covar %f"%det_covar
# print "Part1 %f, Part2 %f, Part3 %f"%(part1,part2,part3)
return result
######################################
######## class State ############
######################################
class State:
'''
This class implements the state of a HMM
'''
def __init__(self,name,n_tr,n_em,out_s,in_s,em_let,tied_t,tied_e,tied_e_mix,end_s,label=None):
'''
__init__(self,name,n_tr,n_em,out_s,in_s,em_let,tied_t,tied_e,end_s,label=None)
name = state name
n_tr = a node_tr object
n_em = a node_em object (either discrete symbol or GMM)
out_s = the state outlinks [list of the state names]
in_s = the state inlinks [list of the state names]
em_let = emission letter [list in the order given by n_em]
tied_t = if is tied to a given transition (state name or None)
tied_e = if is tied to a given emission at state level (state name or None)
tied_e_mix = if is tied to a given state's list of mixture densities at sub-state mixture-tying level (state name or None)
end_s = end state flag
label = classification attribute (None default)
_idxem={} dictionary name:index
_idxtr={} dictionary name:index
'''
self.name=name
self._node_tr=n_tr
self._node_em=n_em
self.out_links=out_s
self.in_links=in_s
self.em_letters=em_let
self.tied_t=tied_t
self.tied_e=tied_e
self.tied_e_mix = tied_e_mix
self.end_state=end_s
self.label=label
self._idxem={}
self._idxtr={}
for name in self.out_links:
self._idxtr[name]=self.out_links.index(name)
for symbol in self.em_letters:
self._idxem[symbol]=self.em_letters.index(symbol)
# some tests
#assert(self._node_tr.len == len(self.out_links))
#assert(self._node_em.len == len(self.em_letters))
#check if state is null/silent
if (self._node_em.get_type_name() == "node_em_gmm"):
if (self._node_em.get_mix_num() == 0):
self.silent = True
else:
self.silent = False
else:
if (self.em_letters == []):
self.silent = True
else:
self.silent = False
def __safe_log(self,x):
if x < DEF.small_positive:
return DEF.big_negative
else:
return NUM.log(x)
def get_tr_name(self):
'''
get_tr_name() -> returns the name of the transitions
'''
return self._node_tr.name
def get_em_name(self):
'''
get_em_name() -> returns the name of the emissions
'''
return self._node_em.name
def get_em_mix_name(self):
'''
Returns the name of emission mixture densities. Since states can only share all or none of their mixtures
this is the same for all mixture densities in a state
'''
return self._node_em.get_em_mix_name()
def get_transitions(self):
'''
get_transitions() -> returns the value of the transitions
'''
return self._node_tr._tr
def get_emissions(self):
'''
get_emissions() -> if the node_em object is the discrete symbol version
then return a list of emission probs else if the gmm version
then returns the node_em_gmm object with the number of
mixtures, a list of means, and a list of covariance matrices
'''
return self._node_em.get_emissions()
def a(self,state):
'''
a_{i,j} in durbin et al., 1998
self.a(state) -> transition probability between self->state
'''
if self._idxtr.has_key(state.name):
return self._node_tr._tr[self._idxtr[state.name]]
else:
return(0.0)
def set_a(self,state,value):
'''
set the value of a_{i,j} in durbin et al., 1998
self.a(state,value) -> self->state = value
'''
self._node_tr.set_tr(self._idxtr[state.name],value)
def e(self,symbol):
'''
e_{k}(x) in durbin et al., 1998
self.e(symbol) -> emission probability in state self of 'symbol'
'''
if (len(symbol) == 1):
if self._idxem.has_key(symbol):
return self._node_em.em(self._idxem[symbol])
else:
return(0.0)
else:
return self._node_em.em(symbol) #symbol is actually a vector profile in this case
def set_e(self,symbol,value):
'''
set the value of e_{k}(x) in durbin et al., 1998
self.e(symbol,value) -> set self.e(symbol)=value
'''
self._node_em.set_em(self._idxem[symbol],value)
def ln_a(self,state):
'''
ln(a_{i,j}) in durbin et al., 1998
self.ln_a(state) -> log(transition probability between self->state)
'''
if self._idxtr.has_key(state.name):
return(self._node_tr.ln_tr(self._idxtr[state.name]))
else:
return(DEF.big_negative)
def ln_e(self,symbol):
'''
ln(e_{k}(x)) in durbin et al., 1998
self.ln_e(symbol) -> log(emission probability in state self of 'symbol')
'''
#ce=self.e(symbol)
#if ce > 0:
# return(NUM.log(ce))
#else:
# return(DEF.big_negative)
if (len(symbol) == 1):
if self._idxem.has_key(symbol):
return self._node_em.ln_em(self._idxem[symbol])
else:
return(0.0)
else:
return self._node_em.ln_em(symbol) #symbol is actually a vector profile in this case
def is_null(self):
'''
Returns True or False boolean value depending on whether or not the state is
null/silent or emitting.
'''
return self.silent
def get_type_name(self):
'''
Returns the type emission node object this State node contains.
Either node_em or node_em_gmm
'''
return self.get_emissions().get_type_name()
########################
|
gpl-3.0
|
vikingMei/mxnet
|
example/bayesian-methods/bdk_demo.py
|
45
|
15837
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import mxnet as mx
import mxnet.ndarray as nd
import numpy
import logging
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
import argparse
from algos import *
from data_loader import *
from utils import *
class CrossEntropySoftmax(mx.operator.NumpyOp):
def __init__(self):
super(CrossEntropySoftmax, self).__init__(False)
def list_arguments(self):
return ['data', 'label']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
data_shape = in_shape[0]
label_shape = in_shape[0]
output_shape = in_shape[0]
return [data_shape, label_shape], [output_shape]
def forward(self, in_data, out_data):
x = in_data[0]
y = out_data[0]
y[:] = numpy.exp(x - x.max(axis=1).reshape((x.shape[0], 1))).astype('float32')
y /= y.sum(axis=1).reshape((x.shape[0], 1))
def backward(self, out_grad, in_data, out_data, in_grad):
l = in_data[1]
y = out_data[0]
dx = in_grad[0]
dx[:] = (y - l)
class LogSoftmax(mx.operator.NumpyOp):
def __init__(self):
super(LogSoftmax, self).__init__(False)
def list_arguments(self):
return ['data', 'label']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
data_shape = in_shape[0]
label_shape = in_shape[0]
output_shape = in_shape[0]
return [data_shape, label_shape], [output_shape]
def forward(self, in_data, out_data):
x = in_data[0]
y = out_data[0]
y[:] = (x - x.max(axis=1, keepdims=True)).astype('float32')
y -= numpy.log(numpy.exp(y).sum(axis=1, keepdims=True)).astype('float32')
# y[:] = numpy.exp(x - x.max(axis=1).reshape((x.shape[0], 1)))
# y /= y.sum(axis=1).reshape((x.shape[0], 1))
def backward(self, out_grad, in_data, out_data, in_grad):
l = in_data[1]
y = out_data[0]
dx = in_grad[0]
dx[:] = (numpy.exp(y) - l).astype('float32')
def classification_student_grad(student_outputs, teacher_pred):
return [student_outputs[0] - teacher_pred]
def regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision):
student_mean = student_outputs[0]
student_var = student_outputs[1]
grad_mean = nd.exp(-student_var) * (student_mean - teacher_pred)
grad_var = (1 - nd.exp(-student_var) * (nd.square(student_mean - teacher_pred)
+ 1.0 / teacher_noise_precision)) / 2
return [grad_mean, grad_var]
def get_mnist_sym(output_op=None, num_hidden=400):
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='mnist_fc1', num_hidden=num_hidden)
net = mx.symbol.Activation(data=net, name='mnist_relu1', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='mnist_fc2', num_hidden=num_hidden)
net = mx.symbol.Activation(data=net, name='mnist_relu2', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='mnist_fc3', num_hidden=10)
if output_op is None:
net = mx.symbol.SoftmaxOutput(data=net, name='softmax')
else:
net = output_op(data=net, name='softmax')
return net
def synthetic_grad(X, theta, sigma1, sigma2, sigmax, rescale_grad=1.0, grad=None):
if grad is None:
grad = nd.empty(theta.shape, theta.context)
theta1 = theta.asnumpy()[0]
theta2 = theta.asnumpy()[1]
v1 = sigma1 ** 2
v2 = sigma2 ** 2
vx = sigmax ** 2
denominator = numpy.exp(-(X - theta1) ** 2 / (2 * vx)) + numpy.exp(
-(X - theta1 - theta2) ** 2 / (2 * vx))
grad_npy = numpy.zeros(theta.shape)
grad_npy[0] = -rescale_grad * ((numpy.exp(-(X - theta1) ** 2 / (2 * vx)) * (X - theta1) / vx
+ numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (
X - theta1 - theta2) / vx) / denominator).sum() \
+ theta1 / v1
grad_npy[1] = -rescale_grad * ((numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (
X - theta1 - theta2) / vx) / denominator).sum() \
+ theta2 / v2
grad[:] = grad_npy
return grad
def get_toy_sym(teacher=True, teacher_noise_precision=None):
if teacher:
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='teacher_fc1', num_hidden=100)
net = mx.symbol.Activation(data=net, name='teacher_relu1', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='teacher_fc2', num_hidden=1)
net = mx.symbol.LinearRegressionOutput(data=net, name='teacher_output',
grad_scale=teacher_noise_precision)
else:
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='student_fc1', num_hidden=100)
net = mx.symbol.Activation(data=net, name='student_relu1', act_type="relu")
student_mean = mx.symbol.FullyConnected(data=net, name='student_mean', num_hidden=1)
student_var = mx.symbol.FullyConnected(data=net, name='student_var', num_hidden=1)
net = mx.symbol.Group([student_mean, student_var])
return net
def dev():
return mx.gpu()
def run_mnist_SGD(training_num=50000):
X, Y, X_test, Y_test = load_mnist(training_num)
minibatch_size = 100
net = get_mnist_sym()
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'softmax_label': nd.zeros((minibatch_size,), ctx=dev())}
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
exe, exe_params, _ = SGD(sym=net, dev=dev(), data_inputs=data_inputs, X=X, Y=Y,
X_test=X_test, Y_test=Y_test,
total_iter_num=1000000,
initializer=initializer,
lr=5E-6, prior_precision=1.0, minibatch_size=100)
def run_mnist_SGLD(training_num=50000):
X, Y, X_test, Y_test = load_mnist(training_num)
minibatch_size = 100
net = get_mnist_sym()
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'softmax_label': nd.zeros((minibatch_size,), ctx=dev())}
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
exe, sample_pool = SGLD(sym=net, dev=dev(), data_inputs=data_inputs, X=X, Y=Y,
X_test=X_test, Y_test=Y_test,
total_iter_num=1000000,
initializer=initializer,
learning_rate=4E-6, prior_precision=1.0, minibatch_size=100,
thin_interval=100, burn_in_iter_num=1000)
def run_mnist_DistilledSGLD(training_num=50000):
X, Y, X_test, Y_test = load_mnist(training_num)
minibatch_size = 100
if training_num >= 10000:
num_hidden = 800
total_iter_num = 1000000
teacher_learning_rate = 1E-6
student_learning_rate = 0.0001
teacher_prior = 1
student_prior = 0.1
perturb_deviation = 0.1
else:
num_hidden = 400
total_iter_num = 20000
teacher_learning_rate = 4E-5
student_learning_rate = 0.0001
teacher_prior = 1
student_prior = 0.1
perturb_deviation = 0.001
teacher_net = get_mnist_sym(num_hidden=num_hidden)
logsoftmax = LogSoftmax()
student_net = get_mnist_sym(output_op=logsoftmax, num_hidden=num_hidden)
data_shape = (minibatch_size,) + X.shape[1::]
teacher_data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'softmax_label': nd.zeros((minibatch_size,), ctx=dev())}
student_data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'softmax_label': nd.zeros((minibatch_size, 10), ctx=dev())}
teacher_initializer = BiasXavier(factor_type="in", magnitude=1)
student_initializer = BiasXavier(factor_type="in", magnitude=1)
student_exe, student_params, _ = \
DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net,
teacher_data_inputs=teacher_data_inputs,
student_data_inputs=student_data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=total_iter_num,
student_initializer=student_initializer,
teacher_initializer=teacher_initializer,
student_optimizing_algorithm="adam",
teacher_learning_rate=teacher_learning_rate,
student_learning_rate=student_learning_rate,
teacher_prior_precision=teacher_prior, student_prior_precision=student_prior,
perturb_deviation=perturb_deviation, minibatch_size=100, dev=dev())
def run_toy_SGLD():
X, Y, X_test, Y_test = load_toy()
minibatch_size = 1
teacher_noise_precision = 1.0 / 9.0
net = get_toy_sym(True, teacher_noise_precision)
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev())}
initializer = mx.init.Uniform(0.07)
exe, params, _ = \
SGLD(sym=net, data_inputs=data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=50000,
initializer=initializer,
learning_rate=1E-4,
# lr_scheduler=mx.lr_scheduler.FactorScheduler(100000, 0.5),
prior_precision=0.1,
burn_in_iter_num=1000,
thin_interval=10,
task='regression',
minibatch_size=minibatch_size, dev=dev())
def run_toy_DistilledSGLD():
X, Y, X_test, Y_test = load_toy()
minibatch_size = 1
teacher_noise_precision = 1.0
teacher_net = get_toy_sym(True, teacher_noise_precision)
student_net = get_toy_sym(False)
data_shape = (minibatch_size,) + X.shape[1::]
teacher_data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev())}
student_data_inputs = {'data': nd.zeros(data_shape, ctx=dev())}
# 'softmax_label': nd.zeros((minibatch_size, 10), ctx=dev())}
teacher_initializer = mx.init.Uniform(0.07)
student_initializer = mx.init.Uniform(0.07)
student_grad_f = lambda student_outputs, teacher_pred: \
regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision)
student_exe, student_params, _ = \
DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net,
teacher_data_inputs=teacher_data_inputs,
student_data_inputs=student_data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=80000,
teacher_initializer=teacher_initializer,
student_initializer=student_initializer,
teacher_learning_rate=1E-4, student_learning_rate=0.01,
# teacher_lr_scheduler=mx.lr_scheduler.FactorScheduler(100000, 0.5),
student_lr_scheduler=mx.lr_scheduler.FactorScheduler(8000, 0.8),
student_grad_f=student_grad_f,
teacher_prior_precision=0.1, student_prior_precision=0.001,
perturb_deviation=0.1, minibatch_size=minibatch_size, task='regression',
dev=dev())
def run_toy_HMC():
X, Y, X_test, Y_test = load_toy()
minibatch_size = Y.shape[0]
noise_precision = 1 / 9.0
net = get_toy_sym(True, noise_precision)
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev())}
initializer = mx.init.Uniform(0.07)
sample_pool = HMC(net, data_inputs=data_inputs, X=X, Y=Y, X_test=X_test, Y_test=Y_test,
sample_num=300000, initializer=initializer, prior_precision=1.0,
learning_rate=1E-3, L=10, dev=dev())
def run_synthetic_SGLD():
theta1 = 0
theta2 = 1
sigma1 = numpy.sqrt(10)
sigma2 = 1
sigmax = numpy.sqrt(2)
X = load_synthetic(theta1=theta1, theta2=theta2, sigmax=sigmax, num=100)
minibatch_size = 1
total_iter_num = 1000000
lr_scheduler = SGLDScheduler(begin_rate=0.01, end_rate=0.0001, total_iter_num=total_iter_num,
factor=0.55)
optimizer = mx.optimizer.create('sgld',
learning_rate=None,
rescale_grad=1.0,
lr_scheduler=lr_scheduler,
wd=0)
updater = mx.optimizer.get_updater(optimizer)
theta = mx.random.normal(0, 1, (2,), mx.cpu())
grad = nd.empty((2,), mx.cpu())
samples = numpy.zeros((2, total_iter_num))
start = time.time()
for i in xrange(total_iter_num):
if (i + 1) % 100000 == 0:
end = time.time()
print("Iter:%d, Time spent: %f" % (i + 1, end - start))
start = time.time()
ind = numpy.random.randint(0, X.shape[0])
synthetic_grad(X[ind], theta, sigma1, sigma2, sigmax, rescale_grad=
X.shape[0] / float(minibatch_size), grad=grad)
updater('theta', grad, theta)
samples[:, i] = theta.asnumpy()
plt.hist2d(samples[0, :], samples[1, :], (200, 200), cmap=plt.cm.jet)
plt.colorbar()
plt.show()
if __name__ == '__main__':
numpy.random.seed(100)
mx.random.seed(100)
parser = argparse.ArgumentParser(
description="Examples in the paper [NIPS2015]Bayesian Dark Knowledge and "
"[ICML2011]Bayesian Learning via Stochastic Gradient Langevin Dynamics")
parser.add_argument("-d", "--dataset", type=int, default=1,
help="Dataset to use. 0 --> TOY, 1 --> MNIST, 2 --> Synthetic Data in "
"the SGLD paper")
parser.add_argument("-l", "--algorithm", type=int, default=2,
help="Type of algorithm to use. 0 --> SGD, 1 --> SGLD, other-->DistilledSGLD")
parser.add_argument("-t", "--training", type=int, default=50000,
help="Number of training samples")
args = parser.parse_args()
training_num = args.training
if args.dataset == 1:
if 0 == args.algorithm:
run_mnist_SGD(training_num)
elif 1 == args.algorithm:
run_mnist_SGLD(training_num)
else:
run_mnist_DistilledSGLD(training_num)
elif args.dataset == 0:
if 1 == args.algorithm:
run_toy_SGLD()
elif 2 == args.algorithm:
run_toy_DistilledSGLD()
elif 3 == args.algorithm:
run_toy_HMC()
else:
run_synthetic_SGLD()
|
apache-2.0
|
skudriashev/incubator-airflow
|
tests/contrib/hooks/test_bigquery_hook.py
|
10
|
9247
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import mock
from airflow.contrib.hooks import bigquery_hook as hook
from oauth2client.contrib.gce import HttpAccessTokenRefreshError
bq_available = True
try:
hook.BigQueryHook().get_service()
except HttpAccessTokenRefreshError:
bq_available = False
class TestBigQueryDataframeResults(unittest.TestCase):
def setUp(self):
self.instance = hook.BigQueryHook()
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_output_is_dataframe_with_valid_query(self):
import pandas as pd
df = self.instance.get_pandas_df('select 1')
self.assertIsInstance(df, pd.DataFrame)
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_throws_exception_with_invalid_query(self):
with self.assertRaises(Exception) as context:
self.instance.get_pandas_df('from `1`')
self.assertIn('pandas_gbq.gbq.GenericGBQException: Reason: invalidQuery',
str(context.exception), "")
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_suceeds_with_explicit_legacy_query(self):
df = self.instance.get_pandas_df('select 1', dialect='legacy')
self.assertEqual(df.iloc(0)[0][0], 1)
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_suceeds_with_explicit_std_query(self):
df = self.instance.get_pandas_df('select * except(b) from (select 1 a, 2 b)', dialect='standard')
self.assertEqual(df.iloc(0)[0][0], 1)
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_throws_exception_with_incompatible_syntax(self):
with self.assertRaises(Exception) as context:
self.instance.get_pandas_df('select * except(b) from (select 1 a, 2 b)', dialect='legacy')
self.assertIn('pandas_gbq.gbq.GenericGBQException: Reason: invalidQuery',
str(context.exception), "")
class TestBigQueryTableSplitter(unittest.TestCase):
def test_internal_need_default_project(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('dataset.table', None)
self.assertIn('INTERNAL: No default project is specified',
str(context.exception), "")
def test_split_dataset_table(self):
project, dataset, table = hook._split_tablename('dataset.table',
'project')
self.assertEqual("project", project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_split_project_dataset_table(self):
project, dataset, table = hook._split_tablename('alternative:dataset.table',
'project')
self.assertEqual("alternative", project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_sql_split_project_dataset_table(self):
project, dataset, table = hook._split_tablename('alternative.dataset.table',
'project')
self.assertEqual("alternative", project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_colon_in_project(self):
project, dataset, table = hook._split_tablename('alt1:alt.dataset.table',
'project')
self.assertEqual('alt1:alt', project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_valid_double_column(self):
project, dataset, table = hook._split_tablename('alt1:alt:dataset.table',
'project')
self.assertEqual('alt1:alt', project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_invalid_syntax_triple_colon(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1:alt2:alt3:dataset.table',
'project')
self.assertIn('Use either : or . to specify project',
str(context.exception), "")
self.assertFalse('Format exception for' in str(context.exception))
def test_invalid_syntax_triple_dot(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1.alt.dataset.table',
'project')
self.assertIn('Expect format of (<project.|<project:)<dataset>.<table>',
str(context.exception), "")
self.assertFalse('Format exception for' in str(context.exception))
def test_invalid_syntax_column_double_project_var(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1:alt2:alt.dataset.table',
'project', 'var_x')
self.assertIn('Use either : or . to specify project',
str(context.exception), "")
self.assertIn('Format exception for var_x:',
str(context.exception), "")
def test_invalid_syntax_triple_colon_project_var(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1:alt2:alt:dataset.table',
'project', 'var_x')
self.assertIn('Use either : or . to specify project',
str(context.exception), "")
self.assertIn('Format exception for var_x:',
str(context.exception), "")
def test_invalid_syntax_triple_dot_var(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1.alt.dataset.table',
'project', 'var_x')
self.assertIn('Expect format of (<project.|<project:)<dataset>.<table>',
str(context.exception), "")
self.assertIn('Format exception for var_x:',
str(context.exception), "")
class TestBigQueryHookSourceFormat(unittest.TestCase):
def test_invalid_source_format(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").run_load("test.test", "test_schema.json", ["test_data.json"], source_format="json")
# since we passed 'json' in, and it's not valid, make sure it's present in the error string.
self.assertIn("JSON", str(context.exception))
# Helpers to test_cancel_queries that have mock_poll_job_complete returning false, unless mock_job_cancel was called with the same job_id
mock_canceled_jobs = []
def mock_poll_job_complete(job_id):
return job_id in mock_canceled_jobs
def mock_job_cancel(projectId, jobId):
mock_canceled_jobs.append(jobId)
return mock.Mock()
class TestBigQueryBaseCursor(unittest.TestCase):
def test_invalid_schema_update_options(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=["THIS IS NOT VALID"]
)
self.assertIn("THIS IS NOT VALID", str(context.exception))
def test_invalid_schema_update_and_write_disposition(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=['ALLOW_FIELD_ADDITION'],
write_disposition='WRITE_EMPTY'
)
self.assertIn("schema_update_options is only", str(context.exception))
@mock.patch("airflow.contrib.hooks.bigquery_hook.LoggingMixin")
@mock.patch("airflow.contrib.hooks.bigquery_hook.time")
def test_cancel_queries(self, mocked_logging, mocked_time):
project_id = 12345
running_job_id = 3
mock_jobs = mock.Mock()
mock_jobs.cancel = mock.Mock(side_effect=mock_job_cancel)
mock_service = mock.Mock()
mock_service.jobs = mock.Mock(return_value=mock_jobs)
bq_hook = hook.BigQueryBaseCursor(mock_service, project_id)
bq_hook.running_job_id = running_job_id
bq_hook.poll_job_complete = mock.Mock(side_effect=mock_poll_job_complete)
bq_hook.cancel_query()
mock_jobs.cancel.assert_called_with(projectId=project_id, jobId=running_job_id)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
aabadie/scikit-learn
|
sklearn/utils/tests/test_estimator_checks.py
|
69
|
3894
|
import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import MultiTaskElasticNet
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised by fit"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check for sparse matrix input handling
name = NoSparseClassifier.__name__
msg = "Estimator " + name + " doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(AdaBoostClassifier)
check_estimator(MultiTaskElasticNet)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
|
bsd-3-clause
|
secimTools/SECIMTools
|
src/scripts/svm_classifier.py
|
1
|
22766
|
#!/usr/bin/env python
################################################################################
# DATE: 2017/06/29
#
# SCRIPT: svm_classifier.py
#
# VERSION: 2.0
#
# AUTHORS: Coded by: Ali Ashrafi ([email protected]>),
# Miguel A Ibarra ([email protected],
# and Alexander Kirpich ([email protected])
#
# DESCRIPTION: This script flags features based on a given threshold.
#
################################################################################
import os, sys
import logging
import argparse
from argparse import RawDescriptionHelpFormatter
import numpy as np
import pandas as pd
from pandas import DataFrame as DF
from pandas import read_csv, read_table
from sklearn import svm
from sklearn import datasets
from sklearn.model_selection import GridSearchCV, cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from secimtools.dataManager import logger as sl
from secimtools.dataManager.interface import wideToDesign
def getOptions(myOpts=None):
parser = argparse.ArgumentParser( formatter_class=RawDescriptionHelpFormatter )
# Standard secmtools inputs
standard = parser.add_argument_group(description="Standard Input")
standard.add_argument('-trw',"--train_wide", dest="train_wide", action='store',
required=True, help="wide part of the train dataset.")
standard.add_argument('-trd',"--train_design", dest="train_design",
action='store', required=True, help="design part of "\
"the train dataset.")
standard.add_argument('-tew',"--test_wide", dest="test_wide", action='store',
required=True, help="wide part of the test dataset.")
standard.add_argument('-ted',"--test_design", dest="test_design",
action='store', required=True, help="design part of"\
" the test dataset.")
standard.add_argument('-g',"--group", dest="group",action='store',
required=True, default=False, help="Name of column in design file"\
" with Group/treatment information.")
standard.add_argument('-id',"--ID", dest="uniqID", action='store',
required=True, help="Name of the column with unique "\
"identifiers.")
# Tool-specific inputs
tool = parser.add_argument_group(description="Tool Input")
tool.add_argument('-k',"--kernel", dest="kernel", action='store',
required=True, help="choice of kernel function: rbf, "\
"linear, poly, sigmoid.")
tool.add_argument('-d',"--degree", dest="degree", action='store',
required=True, help="(integer) degree for the polynomial"\
" kernel, default 3.")
tool.add_argument('-c',"--C", dest="C", action='store', required=True,
help="positive regularization parameter. This parameter is ignored when -cv is single or double")
tool.add_argument('-cv', "--cross_validation", dest="cross_validation", action='store',
required=True, help="Choice of cross-validation procedure for the regularization parameter -c determinantion: none, "\
"single, double.")
tool.add_argument('-c_lower_bound',"--C_lower_bound", dest="C_lower_bound", action='store', required=False,
help="positive regularization parameter lower bound. Ignored if -cv is none and -c is specified.")
tool.add_argument('-c_upper_bound',"--C_upper_bound", dest="C_upper_bound", action='store', required=False,
help="positive regularization parameter upper bound. Ignored if -cv is none and -c is specified.")
tool.add_argument('-a',"--a", dest="a", action='store', required=True,
help=" positive coefficient in kernel function.")
tool.add_argument('-b',"--b", dest="b", action='store', required=True,
help=" independent term coefficient in kernel function.")
# Tool outputs
output = parser.add_argument_group(description="Output Paths")
output.add_argument("-oc","--outClassification",dest="outClassification", action='store', required=True,
help="Name of the output file to store classification performed on the traing data set. TSV format.")
output.add_argument('-oca',"--outClassificationAccuracy", dest="outClassificationAccuracy", action='store',
required=True, help="Output classification accuracy value on the training data set.")
output.add_argument("-op","--outPrediction",dest="outPrediction", action='store', required=True,
help="Name of the output file to store prediction performed on the target data set. TSV format.")
output.add_argument('-opa',"--outPredictionAccuracy", dest="outPredictionAccuracy", action='store',
required=True, help="Output prediction accuracy value on the target data set.")
args = parser.parse_args()
# Standardize paths
args.test_wide = os.path.abspath(args.test_wide)
args.train_wide = os.path.abspath(args.train_wide)
args.test_design = os.path.abspath(args.test_design)
args.train_design = os.path.abspath(args.train_design)
args.outClassification = os.path.abspath(args.outClassification)
args.outClassificationAccuracy = os.path.abspath(args.outClassificationAccuracy)
args.outPrediction = os.path.abspath(args.outPrediction)
args.outPredictionAccuracy = os.path.abspath(args.outPredictionAccuracy)
return(args)
def correctness(x):
if x[args.group]==x['predicted_class']:
return 1
else:
return 0
def getAccuracy(data):
data['correct']=data.apply(correctness,axis=1)
accuracy=float(data['correct'].sum())/data.shape[0]
return accuracy
def main(args):
"""Perform svm classification analysis"""
target = wideToDesign(wide=args.test_wide, design = args.test_design,
uniqID=args.uniqID, group=args.group, logger=logger)
train = wideToDesign(wide=args.train_wide, design= args.train_design,
uniqID=args.uniqID, group=args.group, logger=logger)
# Treat all data as numeric.
train.wide = train.wide.applymap(float)
target.wide = train.wide.applymap(float)
# Drop missing values
train.dropMissing()
# Find out remaining sample IDs for data filtering
sample_ids = train.wide.index.tolist()
train = train.transpose()
target.dropMissing()
target = target.transpose()
# Make sure test and train have the same features
for i in target.columns:
if i not in train.columns:
del target[i]
# Saving input parameters into variables that will be easier to manipulate in the future.
cv_status = args.cross_validation
kernel_final = args.kernel
gamma_final = float(args.a)
coef0_final = float(args.b)
degree_final = int(args.degree)
# Define the data to use for model training.
train_classes_to_feed = train[args.group].copy()
train_data_to_feed = train
del train_data_to_feed[args.group]
# Define the data to use as a model target.
target_classes_to_feed = target[args.group].copy()
target_data_to_feed = target
del target_data_to_feed[args.group]
# Remove non-numeric columns
train_data_to_feed = train_data_to_feed.loc[:,sample_ids]
target_data_to_feed = target_data_to_feed.loc[:,sample_ids]
# Cross validation status can be either "none", "single" or "double".
# Case 1: User provides cv_status = "none". No cross-validation will be performed.
# The value of C has to be specified by the user and pulled from the user's input.
if cv_status == "none":
# Tell the user that we are using the number of components pre-specified by the user.
logger.info("Using the value of C specified by the user.")
# Put the user defined C value into C_final variable.
C_final = float(args.C)
# Case 2: User provides cv_status = "single". Only single cross-validation will be performed for the value of C.
if cv_status == "single":
# Tell the user that we are using the C penalty determined via a single cross-validation.
logger.info("Using the value of C determined via a single cross-validation.")
# Checking if the sample sizes is smaller than 100 and exiting if that is the case.
if (len(train_classes_to_feed) < 100):
logger.info("The required number of samples for a single cross-validation procedure is at least 100. The dataset has {0}.".format(len(train_classes_to_feed)))
logger.info("Exiting the tool.")
exit()
# Defining boundaries of C used for a grid for a cross-validation procedure that user has supplied.
C_lower = float(args.C_lower_bound)
C_upper = float(args.C_upper_bound)
# Debugging step.
# print "C_lower", C_lower
# print "C_upper", C_upper
# Creating a list of values to perform a single cross-validation procedure over a grid.
# We tell the user that the user-specified range will be splitted into 20 pieces and each value will be investigated in cross-validation procedure.
C_list_of_values = np.linspace(C_lower, C_upper, 20)
# Creating dictionary we gonna feed to the single cross-validation procedure.
# In this disctionary gamma is speficied by the user. We are only cross-validating over the value of C.
parameter_list_of_values_dictionary_gamma_specified = { "kernel": [kernel_final],
"C": C_list_of_values,
"gamma": [gamma_final],
"coef0": [coef0_final],
"degree": [degree_final] }
# In this disctionary gamma is determined automatically if the first dictionary fails.
parameter_list_of_values_dictionary_gamma_auto = { "kernel": [kernel_final],
"C": C_list_of_values,
"gamma": ["auto"],
"coef0": [coef0_final],
"degree": [degree_final] }
# Debugging step
# Printing the dictionary that has just been created.
# print "parameter_list_of_values_dictionary_gamma_specified = ", parameter_list_of_values_dictionary_gamma_specified
# print "parameter_list_of_values_dictionary_gamma_auto = ", parameter_list_of_values_dictionary_gamma_auto
# Definging the fit depending on the gamma value.
try:
logger.info("Running SVM model")
# Creating a gridsearch object with parameter "C_list_of_values_dictionary"
internal_cv = GridSearchCV( estimator = SVC(), param_grid = parameter_list_of_values_dictionary_gamma_specified )
except:
logger.info("Model failed with gamma = {0} trying automatic gamma "\
"instead of.".format(float(args.a)))
# Creating a gridsearch object with parameter "C_list_of_values_dictionary"
internal_cv = GridSearchCV( estimator = SVC(), param_grid = parameter_list_of_values_dictionary_gamma_auto )
# Performing internal_cv.
internal_cv.fit(train_data_to_feed, train_classes_to_feed)
# Debugging step.
# print "internal_cv.fit(train_data_to_feed, train_classes_to_feed) = ", internal_cv.fit(train_data_to_feed, train_classes_to_feed)
# print "train_data_to_feed =", train_data_to_feed
# print "train_classes_to_feed =", train_classes_to_feed
# print "internal_cv.best_score_ = ", internal_cv.best_score_
# print "internal_cv.cv_results_ = ", internal_cv.cv_results_
# print "internal_cv.best_score_ = ", internal_cv.best_score_
# print "internal_cv.best_params_ = ", internal_cv.best_params_['C']
# print "internal_cv.cv_results_['params'][internal_cv.best_index_] = ", internal_cv.cv_results_['params'][internal_cv.best_index_]
# Assigning C_final from the best internal_cv i.e. internal_cv.best_params_['C']
C_final = internal_cv.best_params_['C']
# Case 3: User provides cv_status = "double". Double cross-validation will be performed.
if cv_status == "double":
# Telling the user that we are using the C penalty determined via a double cross-validation.
logger.info("Using the value of C determined via a double cross-validation.")
# Checking if the sample sizes is smaller than 100 and exiting if that is the case.
if (len(train_classes_to_feed) < 100):
logger.info("The required number of samples for a double cross-validation procedure is at least 100. The dataset has {0}.".format(len(train_classes_to_feed)))
logger.info("Exiting the tool.")
exit()
# Defining boundaries of C used for a grid for a cross-validation procedure that user has supplied.
C_lower = float(args.C_lower_bound)
C_upper = float(args.C_upper_bound)
# Debugging step.
# print "C_lower", C_lower
# print "C_upper", C_upper
# Creating a list of values to perform single cross-validation over.
# We tell the user that the user-specified range will be splitted into 20 pieces and each value will be investigated in cross-validation procedure.
C_list_of_values = np.linspace(C_lower, C_upper, 20)
# Creating C_final equal to the first element of indexed array C_list_of_values.
# This will be updated during internal and external CV steps if necessary.
C_final = C_list_of_values[0]
for index_current in range(0, 20):
# Creating the set of candidates that we will use for both cross-validation loops: internal and external
C_list_of_values_current = np.linspace(C_list_of_values[0], C_list_of_values[index_current], (index_current+1) )
# Creating dictionary we gonna feed to the single cross-validation procedure.
# In this disctionary gamma is speficied by the user.
parameter_list_of_values_dictionary_gamma_specified = { "kernel": [kernel_final],
"C": C_list_of_values_current,
"gamma": [gamma_final],
"coef0": [coef0_final],
"degree": [degree_final] }
# In this disctionary gamma is determined automatically if the first dictionary fails.
parameter_list_of_values_dictionary_gamma_auto = { "kernel": [kernel_final],
"C": C_list_of_values_current,
"gamma": ["auto"],
"coef0": [coef0_final],
"degree": [degree_final] }
# Debugging step
# Printing the dictionary that has just been created.
# print "parameter_list_of_values_dictionary_gamma_specified = ", parameter_list_of_values_dictionary_gamma_specified
# print "parameter_list_of_values_dictionary_gamma_auto = ", parameter_list_of_values_dictionary_gamma_auto
# Definging the fit depending on gamma value.
try:
logger.info("Running SVM model")
# Creating a gridsearch object with parameter "C_list_of_values_dictionary"
internal_cv = GridSearchCV( estimator = SVC(), param_grid = parameter_list_of_values_dictionary_gamma_specified )
except:
logger.info("Model failed with gamma = {0} trying automatic gamma "\
"instead of.".format(float(args.a)))
# Creating a gridsearch object with parameter "C_list_of_values_dictionary"
internal_cv = GridSearchCV( estimator = SVC(), param_grid = parameter_list_of_values_dictionary_gamma_auto )
# Debugging piece.
# Performing internal_cv.
internal_cv.fit(train_data_to_feed, train_classes_to_feed)
# print "train_classes_to_feed =", train_classes_to_feed
# print "internal_cv.best_score_ = ", internal_cv.best_score_
# print "internal_cv.grid_scores_ = ", internal_cv.grid_scores_
# print "internal_cv.best_params_['C'] = ", internal_cv.best_params_['C']
# Performing external_cv using internal_cv
external_cv = cross_val_score(internal_cv, train_data_to_feed, train_classes_to_feed)
# Debugging piece.
# print external_cv
# print external_cv.mean()
# Checking whether adding this current value to C_list_of_values_current helped improve the result.
# For the first run C_list_of_values[0] i.e. for index_current = 0 we assume that external_cv.mean() is the best already.
# It is the best since we have not tried anything else yet.
if index_current == 0:
best_predction_proportion = external_cv.mean()
else:
# Checking whether adding this extra component helped to what we already had.
if external_cv.mean() > best_predction_proportion:
best_predction_proportion = external_cv.mean()
C_final = C_list_of_values[index_current]
# This piece of code will work after we decided what C_final we will use.
# C_final has to be determined at this time via either user of via single or double cv.
# This number shoul be saved by now in C_final variable.
# Debugging piece.
C_final = float(C_final)
print("The value of C used for the SVM classifier is ", C_final)
# Trainig the SVM
try:
logger.info("Running SVM model")
svm_model = svm.SVC(kernel=args.kernel, C=C_final, gamma=float(args.a),
coef0=float(args.b), degree=int(args.degree))
except:
logger.info("Model failed with gamma = {0} trying automatic gamma "\
"instead.".format(float(args.a)))
svm_model = svm.SVC(kernel=args.kernel, C=C_final, gamma="auto",
coef0=float(args.b), degree=int(args.degree))
# Fitting the svm_model here.
svm_model.fit( train_data_to_feed, train_classes_to_feed )
# Dealing with predicted and classification data frame.
# Geting predicted values of SVM for the training data set.
train_fitted_values = svm_model.predict( train_data_to_feed )
# Debugging peice.
# print "train_classes_to_feed = ", train_classes_to_feed
# print "train_fitted_values = ", train_fitted_values
# print "type(train_classes_to_feed) = ", type(train_classes_to_feed)
# print "type(train_fitted_values) = ", type(train_fitted_values)
# print "fitted_values.T.squeeze() = ", fitted_values.T.squeeze()
# Converting observed and predicted into pd.series so that we can join them.
train_fitted_values_series = pd.Series(train_fitted_values, index=train_classes_to_feed.index )
train_classes_to_feed_series = pd.Series(train_classes_to_feed, index=train_classes_to_feed.index )
# Debugging piece
# print "train_fitted_values_series = ", train_fitted_values_series
# print "train_classes_to_feed_series = ", train_classes_to_feed_series
# Combining results into the data_frame so that it can be exported.
classification_df = pd.DataFrame( {'Group_Observed': train_classes_to_feed_series ,
'Group_Predicted': train_fitted_values_series } )
# Debugging piece
# print classification_df
# Outputting classication into the tsv file.
classification_df.to_csv(args.outClassification, index='sampleID', sep='\t')
# Computing mismatches between original data and final data
classification_mismatch_percent = 100 * sum( classification_df['Group_Observed'] == classification_df['Group_Predicted'] )/classification_df.shape[0]
classification_mismatch_percent_string = str( classification_mismatch_percent ) + ' Percent'
os.system("echo %s > %s"%( classification_mismatch_percent_string, args.outClassificationAccuracy ) )
# Geting predicted values of SVM for the target data set.
target_fitted_values = svm_model.predict( target_data_to_feed )
# Debugging peice.
# print "target_classes_to_feed = ", target_classes_to_feed
# print "target_fitted_values = ", target_fitted_values
# print "type(target_classes_to_feed) = ", type(target_classes_to_feed)
# print "type(target_fitted_values) = ", type(target_fitted_values)
# print "fitted_values.T.squeeze() = ", fitted_values.T.squeeze()
# Converting observed and predicted into pd.series so that we can join them.
target_fitted_values_series = pd.Series(target_fitted_values, index=target_classes_to_feed.index )
target_classes_to_feed_series = pd.Series(target_classes_to_feed, index=target_classes_to_feed.index )
# Debugging piece
# print "target_fitted_values_series = ", target_fitted_values_series
# print "target_classes_to_feed_series = ", target_classes_to_feed_series
# Combining results into the data_frame so that it can be exported.
prediction_df = pd.DataFrame( {'Group_Observed': target_classes_to_feed_series ,
'Group_Predicted': target_fitted_values_series } )
# Debugging piece
# print prediction_df
# Outputting classication into the tsv file.
prediction_df.to_csv(args.outPrediction, index='sampleID', sep='\t')
# Computing mismatches between original data and final data
prediction_mismatch_percent = 100 * sum( prediction_df['Group_Observed'] == prediction_df['Group_Predicted'] )/prediction_df.shape[0]
prediction_mismatch_percent_string = str( prediction_mismatch_percent ) + ' Percent'
os.system("echo %s > %s"%( prediction_mismatch_percent_string, args.outPredictionAccuracy ) )
# Finishing script
logger.info("Script Complete!")
if __name__ == '__main__':
args = getOptions()
logger = logging.getLogger()
sl.setLogger(logger)
main(args)
|
mit
|
lancezlin/ml_template_py
|
lib/python2.7/site-packages/sklearn/datasets/mlcomp.py
|
289
|
3855
|
# Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
|
mit
|
oemof/reegis-hp
|
reegis_hp/de21/storages.py
|
3
|
2558
|
import os
import pandas as pd
import configuration as config
from shapely.geometry import Point
import powerplants as pp
import numpy as np
def lat_lon2point(df):
"""Create shapely point object of latitude and longitude."""
return Point(df['Wikipedia', 'longitude'], df['Wikipedia', 'latitude'])
def pumped_hydroelectric_storage(c):
phes_raw = pd.read_csv(os.path.join(c.paths['static'],
c.files['hydro_storages']),
header=[0, 1]).sort_index(1)
phes = phes_raw['dena'].copy()
# add geometry from wikipedia
phes_raw = phes_raw[phes_raw['Wikipedia', 'longitude'].notnull()]
phes['geom'] = (phes_raw.apply(lat_lon2point, axis=1))
# add energy from ZFES because dena values seem to be corrupted
phes['energy'] = phes_raw['ZFES', 'energy']
phes['name'] = phes_raw['ZFES', 'name']
# TODO: 0.75 should come from config file
phes['efficiency'] = phes['efficiency'].fillna(0.75)
# remove storages that do not have an entry for energy capacity
phes = phes[phes.energy.notnull()]
# create a GeoDataFrame with geom column
gphes = pp.create_geo_df(phes)
# Add column with region id
gphes = pp.add_spatial_name(
c, gphes, os.path.join(c.paths['geometry'],
c.files['region_polygons']),
'region', 'offshore')
# # Add column with coastdat id
# gphes = pp.add_spatial_name(
# c, gphes, os.path.join(c.paths['geometry'],
# c.files['coastdatgrid_polygons']),
# 'coastdat_id', 'offshore')
# copy results from GeoDataFrame to DataFrame and remove obsolete columns
phes['region'] = gphes['region']
del phes['geom']
del phes['name']
del phes['energy_inflow']
# create turbine and pump efficiency from overall efficiency (square root)
# multiply the efficiency with the capacity to group with "sum()"
phes['pump_eff'] = np.sqrt(phes.efficiency) * phes.pump
phes['turbine_eff'] = np.sqrt(phes.efficiency) * phes.turbine
phes = phes.groupby('region').sum()
# divide by the capacity to get the efficiency and remove overall efficiency
phes['pump_eff'] = phes.pump_eff / phes.pump
phes['turbine_eff'] = phes.turbine_eff / phes.turbine
del phes['efficiency']
phes.to_csv(os.path.join(c.paths['storages'],
c.files['hydro_storages_de21']))
if __name__ == "__main__":
cfg = config.get_configuration()
pumped_hydroelectric_storage(cfg)
|
gpl-3.0
|
cloudera/ibis
|
ibis/backends/postgres/tests/test_functions.py
|
1
|
47290
|
import operator
import os
import string
import warnings
from datetime import date, datetime
import numpy as np
import pandas as pd
import pandas.testing as tm
import pytest
from pytest import param
import ibis
import ibis.config as config
import ibis.expr.datatypes as dt
import ibis.expr.types as ir
from ibis import literal as L
from ibis.expr.window import rows_with_max_lookback
sa = pytest.importorskip('sqlalchemy')
pytest.importorskip('psycopg2')
pytestmark = pytest.mark.postgres
@pytest.fixture
def guid(con):
name = ibis.util.guid()
try:
yield name
finally:
con.drop_table(name, force=True)
@pytest.fixture
def guid2(con):
name = ibis.util.guid()
try:
yield name
finally:
con.drop_table(name, force=True)
@pytest.mark.parametrize(
('left_func', 'right_func'),
[
param(
lambda t: t.double_col.cast('int8'),
lambda at: sa.cast(at.c.double_col, sa.SMALLINT),
id='double_to_int8',
),
param(
lambda t: t.double_col.cast('int16'),
lambda at: sa.cast(at.c.double_col, sa.SMALLINT),
id='double_to_int16',
),
param(
lambda t: t.string_col.cast('double'),
lambda at: sa.cast(
at.c.string_col, sa.dialects.postgresql.DOUBLE_PRECISION
),
id='string_to_double',
),
param(
lambda t: t.string_col.cast('float'),
lambda at: sa.cast(at.c.string_col, sa.REAL),
id='string_to_float',
),
param(
lambda t: t.string_col.cast('decimal'),
lambda at: sa.cast(at.c.string_col, sa.NUMERIC(9, 0)),
id='string_to_decimal_no_params',
),
param(
lambda t: t.string_col.cast('decimal(9, 3)'),
lambda at: sa.cast(at.c.string_col, sa.NUMERIC(9, 3)),
id='string_to_decimal_params',
),
],
)
def test_cast(alltypes, at, translate, left_func, right_func):
left = left_func(alltypes)
right = right_func(at)
assert str(translate(left).compile()) == str(right.compile())
def test_date_cast(alltypes, at, translate):
result = alltypes.date_string_col.cast('date')
expected = sa.cast(at.c.date_string_col, sa.DATE)
assert str(translate(result)) == str(expected)
@pytest.mark.parametrize(
'column',
[
'index',
'Unnamed: 0',
'id',
'bool_col',
'tinyint_col',
'smallint_col',
'int_col',
'bigint_col',
'float_col',
'double_col',
'date_string_col',
'string_col',
'timestamp_col',
'year',
'month',
],
)
def test_noop_cast(alltypes, at, translate, column):
col = alltypes[column]
result = col.cast(col.type())
expected = at.c[column]
assert result.equals(col)
assert str(translate(result)) == str(expected)
def test_timestamp_cast_noop(alltypes, at, translate):
# See GH #592
result1 = alltypes.timestamp_col.cast('timestamp')
result2 = alltypes.int_col.cast('timestamp')
assert isinstance(result1, ir.TimestampColumn)
assert isinstance(result2, ir.TimestampColumn)
expected1 = at.c.timestamp_col
expected2 = sa.func.timezone('UTC', sa.func.to_timestamp(at.c.int_col))
assert str(translate(result1)) == str(expected1)
assert str(translate(result2)) == str(expected2)
@pytest.mark.parametrize(
('func', 'expected'),
[
param(operator.methodcaller('year'), 2015, id='year'),
param(operator.methodcaller('month'), 9, id='month'),
param(operator.methodcaller('day'), 1, id='day'),
param(operator.methodcaller('hour'), 14, id='hour'),
param(operator.methodcaller('minute'), 48, id='minute'),
param(operator.methodcaller('second'), 5, id='second'),
param(operator.methodcaller('millisecond'), 359, id='millisecond'),
param(lambda x: x.day_of_week.index(), 1, id='day_of_week_index'),
param(
lambda x: x.day_of_week.full_name(),
'Tuesday',
id='day_of_week_full_name',
),
],
)
def test_simple_datetime_operations(con, func, expected, translate):
value = ibis.timestamp('2015-09-01 14:48:05.359')
assert con.execute(func(value)) == expected
@pytest.mark.parametrize(
'pattern',
[
# there could be pathological failure at midnight somewhere, but
# that's okay
'%Y%m%d %H',
# test quoting behavior
'DD BAR %w FOO "DD"',
'DD BAR %w FOO "D',
'DD BAR "%w" FOO "D',
'DD BAR "%d" FOO "D',
param(
'DD BAR "%c" FOO "D',
marks=pytest.mark.xfail(
condition=os.name == 'nt',
reason='Locale-specific format specs not available on Windows',
),
),
param(
'DD BAR "%x" FOO "D',
marks=pytest.mark.xfail(
condition=os.name == 'nt',
reason='Locale-specific format specs not available on Windows',
),
),
param(
'DD BAR "%X" FOO "D',
marks=pytest.mark.xfail(
condition=os.name == 'nt',
reason='Locale-specific format specs not available on Windows',
),
),
],
)
def test_strftime(con, pattern):
value = ibis.timestamp('2015-09-01 14:48:05.359')
raw_value = datetime(
year=2015,
month=9,
day=1,
hour=14,
minute=48,
second=5,
microsecond=359000,
)
assert con.execute(value.strftime(pattern)) == raw_value.strftime(pattern)
@pytest.mark.parametrize(
('func', 'left', 'right', 'expected'),
[
param(operator.add, L(3), L(4), 7, id='add'),
param(operator.sub, L(3), L(4), -1, id='sub'),
param(operator.mul, L(3), L(4), 12, id='mul'),
param(operator.truediv, L(12), L(4), 3, id='truediv_no_remainder'),
param(operator.pow, L(12), L(2), 144, id='pow'),
param(operator.mod, L(12), L(5), 2, id='mod'),
param(operator.truediv, L(7), L(2), 3.5, id='truediv_remainder'),
param(operator.floordiv, L(7), L(2), 3, id='floordiv'),
param(
lambda x, y: x.floordiv(y), L(7), 2, 3, id='floordiv_no_literal'
),
param(
lambda x, y: x.rfloordiv(y), L(2), 7, 3, id='rfloordiv_no_literal'
),
],
)
def test_binary_arithmetic(con, func, left, right, expected):
expr = func(left, right)
result = con.execute(expr)
assert result == expected
@pytest.mark.parametrize(
('value', 'expected'),
[
param(L('foo_bar'), 'text', id='text'),
param(L(5), 'integer', id='integer'),
param(ibis.NA, 'null', id='null'),
# TODO(phillipc): should this really be double?
param(L(1.2345), 'numeric', id='numeric'),
param(
L(
datetime(
2015,
9,
1,
hour=14,
minute=48,
second=5,
microsecond=359000,
)
),
'timestamp without time zone',
id='timestamp_without_time_zone',
),
param(L(date(2015, 9, 1)), 'date', id='date'),
],
)
def test_typeof(con, value, expected):
assert con.execute(value.typeof()) == expected
@pytest.mark.parametrize(('value', 'expected'), [(0, None), (5.5, 5.5)])
def test_nullifzero(con, value, expected):
assert con.execute(L(value).nullifzero()) == expected
@pytest.mark.parametrize(('value', 'expected'), [('foo_bar', 7), ('', 0)])
def test_string_length(con, value, expected):
assert con.execute(L(value).length()) == expected
@pytest.mark.parametrize(
('op', 'expected'),
[
param(operator.methodcaller('left', 3), 'foo', id='left'),
param(operator.methodcaller('right', 3), 'bar', id='right'),
param(operator.methodcaller('substr', 0, 3), 'foo', id='substr_0_3'),
param(operator.methodcaller('substr', 4, 3), 'bar', id='substr_4, 3'),
param(operator.methodcaller('substr', 1), 'oo_bar', id='substr_1'),
],
)
def test_string_substring(con, op, expected):
value = L('foo_bar')
assert con.execute(op(value)) == expected
@pytest.mark.parametrize(
('opname', 'expected'),
[('lstrip', 'foo '), ('rstrip', ' foo'), ('strip', 'foo')],
)
def test_string_strip(con, opname, expected):
op = operator.methodcaller(opname)
value = L(' foo ')
assert con.execute(op(value)) == expected
@pytest.mark.parametrize(
('opname', 'count', 'char', 'expected'),
[('lpad', 6, ' ', ' foo'), ('rpad', 6, ' ', 'foo ')],
)
def test_string_pad(con, opname, count, char, expected):
op = operator.methodcaller(opname, count, char)
value = L('foo')
assert con.execute(op(value)) == expected
def test_string_reverse(con):
assert con.execute(L('foo').reverse()) == 'oof'
def test_string_upper(con):
assert con.execute(L('foo').upper()) == 'FOO'
def test_string_lower(con):
assert con.execute(L('FOO').lower()) == 'foo'
@pytest.mark.parametrize(
('haystack', 'needle', 'expected'),
[
('foobar', 'bar', True),
('foobar', 'foo', True),
('foobar', 'baz', False),
('100%', '%', True),
('a_b_c', '_', True),
],
)
def test_string_contains(con, haystack, needle, expected):
value = L(haystack)
expr = value.contains(needle)
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('value', 'expected'),
[('foo bar foo', 'Foo Bar Foo'), ('foobar Foo', 'Foobar Foo')],
)
def test_capitalize(con, value, expected):
assert con.execute(L(value).capitalize()) == expected
def test_repeat(con):
expr = L('bar ').repeat(3)
assert con.execute(expr) == 'bar bar bar '
def test_re_replace(con):
expr = L('fudge|||chocolate||candy').re_replace('\\|{2,3}', ', ')
assert con.execute(expr) == 'fudge, chocolate, candy'
def test_translate(con):
expr = L('faab').translate('a', 'b')
assert con.execute(expr) == 'fbbb'
@pytest.mark.parametrize(
('raw_value', 'expected'), [('a', 0), ('b', 1), ('d', -1), (None, 3)]
)
def test_find_in_set(con, raw_value, expected):
value = L(raw_value, dt.string)
haystack = ['a', 'b', 'c', None]
expr = value.find_in_set(haystack)
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('raw_value', 'opname', 'expected'),
[
(None, 'isnull', True),
(1, 'isnull', False),
(None, 'notnull', False),
(1, 'notnull', True),
],
)
def test_isnull_notnull(con, raw_value, opname, expected):
lit = L(raw_value)
op = operator.methodcaller(opname)
expr = op(lit)
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
param(L('foobar').find('bar'), 3, id='find_pos'),
param(L('foobar').find('baz'), -1, id='find_neg'),
param(L('foobar').like('%bar'), True, id='like_left_pattern'),
param(L('foobar').like('foo%'), True, id='like_right_pattern'),
param(L('foobar').like('%baz%'), False, id='like_both_sides_pattern'),
param(L('foobar').like(['%bar']), True, id='like_list_left_side'),
param(L('foobar').like(['foo%']), True, id='like_list_right_side'),
param(L('foobar').like(['%baz%']), False, id='like_list_both_sides'),
param(
L('foobar').like(['%bar', 'foo%']), True, id='like_list_multiple'
),
param(L('foobarfoo').replace('foo', 'H'), 'HbarH', id='replace'),
param(L('a').ascii_str(), ord('a'), id='ascii_str'),
],
)
def test_string_functions(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
param(L('abcd').re_search('[a-z]'), True, id='re_search_match'),
param(L('abcd').re_search(r'[\d]+'), False, id='re_search_no_match'),
param(
L('1222').re_search(r'[\d]+'), True, id='re_search_match_number'
),
],
)
def test_regexp(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
param(
L('abcd').re_extract('([a-z]+)', 0), 'abcd', id='re_extract_whole'
),
param(
L('abcd').re_extract('(ab)(cd)', 1), 'cd', id='re_extract_first'
),
# valid group number but no match => empty string
param(L('abcd').re_extract(r'(\d)', 0), '', id='re_extract_no_match'),
# match but not a valid group number => NULL
param(L('abcd').re_extract('abcd', 3), None, id='re_extract_match'),
],
)
def test_regexp_extract(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
param(ibis.NA.fillna(5), 5, id='filled'),
param(L(5).fillna(10), 5, id='not_filled'),
param(L(5).nullif(5), None, id='nullif_null'),
param(L(10).nullif(5), 10, id='nullif_not_null'),
],
)
def test_fillna_nullif(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
param(ibis.coalesce(5, None, 4), 5, id='first'),
param(ibis.coalesce(ibis.NA, 4, ibis.NA), 4, id='second'),
param(ibis.coalesce(ibis.NA, ibis.NA, 3.14), 3.14, id='third'),
],
)
def test_coalesce(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
param(ibis.coalesce(ibis.NA, ibis.NA), None, id='all_null'),
param(
ibis.coalesce(ibis.NA, ibis.NA, ibis.NA.cast('double')),
None,
id='all_nulls_with_one_cast',
),
param(
ibis.coalesce(
ibis.NA.cast('int8'),
ibis.NA.cast('int8'),
ibis.NA.cast('int8'),
),
None,
id='all_nulls_with_all_cast',
),
],
)
def test_coalesce_all_na(con, expr, expected):
assert con.execute(expr) == expected
def test_numeric_builtins_work(alltypes, df):
expr = alltypes.double_col.fillna(0)
result = expr.execute()
expected = df.double_col.fillna(0)
expected.name = 'tmp'
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('op', 'pandas_op'),
[
param(
lambda t: (t.double_col > 20).ifelse(10, -20),
lambda df: pd.Series(
np.where(df.double_col > 20, 10, -20), dtype='int8'
),
id='simple',
),
param(
lambda t: (t.double_col > 20).ifelse(10, -20).abs(),
lambda df: pd.Series(
np.where(df.double_col > 20, 10, -20), dtype='int8'
).abs(),
id='abs',
),
],
)
def test_ifelse(alltypes, df, op, pandas_op):
expr = op(alltypes)
result = expr.execute()
result.name = None
expected = pandas_op(df)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('func', 'pandas_func'),
[
# tier and histogram
param(
lambda d: d.bucket([0, 10, 25, 50, 100]),
lambda s: pd.cut(
s, [0, 10, 25, 50, 100], right=False, labels=False
),
id='include_over_false',
),
param(
lambda d: d.bucket([0, 10, 25, 50], include_over=True),
lambda s: pd.cut(
s, [0, 10, 25, 50, np.inf], right=False, labels=False
),
id='include_over_true',
),
param(
lambda d: d.bucket([0, 10, 25, 50], close_extreme=False),
lambda s: pd.cut(s, [0, 10, 25, 50], right=False, labels=False),
id='close_extreme_false',
),
param(
lambda d: d.bucket(
[0, 10, 25, 50], closed='right', close_extreme=False
),
lambda s: pd.cut(
s,
[0, 10, 25, 50],
include_lowest=False,
right=True,
labels=False,
),
id='closed_right',
),
param(
lambda d: d.bucket([10, 25, 50, 100], include_under=True),
lambda s: pd.cut(
s, [0, 10, 25, 50, 100], right=False, labels=False
),
id='include_under_true',
),
],
)
def test_bucket(alltypes, df, func, pandas_func):
expr = func(alltypes.double_col)
result = expr.execute()
expected = pandas_func(df.double_col).astype('category')
tm.assert_series_equal(result, expected, check_names=False)
def test_category_label(alltypes, df):
t = alltypes
d = t.double_col
bins = [0, 10, 25, 50, 100]
labels = ['a', 'b', 'c', 'd']
bucket = d.bucket(bins)
expr = bucket.label(labels)
result = expr.execute()
with warnings.catch_warnings():
warnings.simplefilter('ignore')
result = pd.Series(pd.Categorical(result, ordered=True))
result.name = 'double_col'
expected = pd.cut(df.double_col, bins, labels=labels, right=False)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('distinct1', 'distinct2', 'expected1', 'expected2'),
[
(True, True, 'UNION', 'UNION'),
(True, False, 'UNION', 'UNION ALL'),
(False, True, 'UNION ALL', 'UNION'),
(False, False, 'UNION ALL', 'UNION ALL'),
],
)
def test_union_cte(alltypes, distinct1, distinct2, expected1, expected2):
t = alltypes
expr1 = t.group_by(t.string_col).aggregate(metric=t.double_col.sum())
expr2 = expr1.view()
expr3 = expr1.view()
expr = expr1.union(expr2, distinct=distinct1).union(
expr3, distinct=distinct2
)
result = '\n'.join(
map(
lambda line: line.rstrip(), # strip trailing whitespace
str(
expr.compile().compile(compile_kwargs={'literal_binds': True})
).splitlines(),
)
)
expected = """\
WITH anon_1 AS
(SELECT t0.string_col AS string_col, sum(t0.double_col) AS metric
FROM functional_alltypes AS t0 GROUP BY t0.string_col),
anon_2 AS
(SELECT t0.string_col AS string_col, sum(t0.double_col) AS metric
FROM functional_alltypes AS t0 GROUP BY t0.string_col),
anon_3 AS
(SELECT t0.string_col AS string_col, sum(t0.double_col) AS metric
FROM functional_alltypes AS t0 GROUP BY t0.string_col)
(SELECT anon_1.string_col, anon_1.metric
FROM anon_1 {} SELECT anon_2.string_col, anon_2.metric
FROM anon_2) {} SELECT anon_3.string_col, anon_3.metric
FROM anon_3""".format(
expected1, expected2
)
assert str(result) == expected
@pytest.mark.parametrize(
('func', 'pandas_func'),
[
param(
lambda t, cond: t.bool_col.count(),
lambda df, cond: df.bool_col.count(),
id='count',
),
param(
lambda t, cond: t.bool_col.any(),
lambda df, cond: df.bool_col.any(),
id='any',
),
param(
lambda t, cond: t.bool_col.all(),
lambda df, cond: df.bool_col.all(),
id='all',
),
param(
lambda t, cond: t.bool_col.notany(),
lambda df, cond: ~df.bool_col.any(),
id='notany',
),
param(
lambda t, cond: t.bool_col.notall(),
lambda df, cond: ~df.bool_col.all(),
id='notall',
),
param(
lambda t, cond: t.double_col.sum(),
lambda df, cond: df.double_col.sum(),
id='sum',
),
param(
lambda t, cond: t.double_col.mean(),
lambda df, cond: df.double_col.mean(),
id='mean',
),
param(
lambda t, cond: t.double_col.min(),
lambda df, cond: df.double_col.min(),
id='min',
),
param(
lambda t, cond: t.double_col.max(),
lambda df, cond: df.double_col.max(),
id='max',
),
param(
lambda t, cond: t.double_col.var(),
lambda df, cond: df.double_col.var(),
id='var',
),
param(
lambda t, cond: t.double_col.std(),
lambda df, cond: df.double_col.std(),
id='std',
),
param(
lambda t, cond: t.double_col.var(how='sample'),
lambda df, cond: df.double_col.var(ddof=1),
id='samp_var',
),
param(
lambda t, cond: t.double_col.std(how='pop'),
lambda df, cond: df.double_col.std(ddof=0),
id='pop_std',
),
param(
lambda t, cond: t.bool_col.count(where=cond),
lambda df, cond: df.bool_col[cond].count(),
id='count_where',
),
param(
lambda t, cond: t.double_col.sum(where=cond),
lambda df, cond: df.double_col[cond].sum(),
id='sum_where',
),
param(
lambda t, cond: t.double_col.mean(where=cond),
lambda df, cond: df.double_col[cond].mean(),
id='mean_where',
),
param(
lambda t, cond: t.double_col.min(where=cond),
lambda df, cond: df.double_col[cond].min(),
id='min_where',
),
param(
lambda t, cond: t.double_col.max(where=cond),
lambda df, cond: df.double_col[cond].max(),
id='max_where',
),
param(
lambda t, cond: t.double_col.var(where=cond),
lambda df, cond: df.double_col[cond].var(),
id='var_where',
),
param(
lambda t, cond: t.double_col.std(where=cond),
lambda df, cond: df.double_col[cond].std(),
id='std_where',
),
param(
lambda t, cond: t.double_col.var(where=cond, how='sample'),
lambda df, cond: df.double_col[cond].var(),
id='samp_var_where',
),
param(
lambda t, cond: t.double_col.std(where=cond, how='pop'),
lambda df, cond: df.double_col[cond].std(ddof=0),
id='pop_std_where',
),
],
)
def test_aggregations(alltypes, df, func, pandas_func):
table = alltypes.limit(100)
df = df.head(table.count().execute())
cond = table.string_col.isin(['1', '7'])
expr = func(table, cond)
result = expr.execute()
expected = pandas_func(df, cond.execute())
np.testing.assert_allclose(result, expected)
def test_not_contains(alltypes, df):
n = 100
table = alltypes.limit(n)
expr = table.string_col.notin(['1', '7'])
result = expr.execute()
expected = ~df.head(n).string_col.isin(['1', '7'])
tm.assert_series_equal(result, expected, check_names=False)
def test_group_concat(alltypes, df):
expr = alltypes.string_col.group_concat()
result = expr.execute()
expected = ','.join(df.string_col.dropna())
assert result == expected
def test_distinct_aggregates(alltypes, df):
expr = alltypes.limit(100).double_col.nunique()
result = expr.execute()
assert result == df.head(100).double_col.nunique()
def test_not_exists(alltypes, df):
t = alltypes
t2 = t.view()
expr = t[~((t.string_col == t2.string_col).any())]
result = expr.execute()
left, right = df, t2.execute()
expected = left[left.string_col != right.string_col]
tm.assert_frame_equal(
result, expected, check_index_type=False, check_dtype=False
)
def test_interactive_repr_shows_error(alltypes):
# #591. Doing this in PostgreSQL because so many built-in functions are
# not available
expr = alltypes.double_col.approx_median()
with config.option_context('interactive', True):
result = repr(expr)
assert 'no translation rule' in result.lower()
def test_subquery(alltypes, df):
t = alltypes
expr = (
t.mutate(d=t.double_col.fillna(0))
.limit(1000)
.group_by('string_col')
.size()
)
result = expr.execute().sort_values('string_col').reset_index(drop=True)
expected = (
df.assign(d=df.double_col.fillna(0))
.head(1000)
.groupby('string_col')
.string_col.count()
.reset_index(name='count')
.sort_values('string_col')
.reset_index(drop=True)
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('func', ['mean', 'sum', 'min', 'max'])
def test_simple_window(alltypes, func, df):
t = alltypes
f = getattr(t.double_col, func)
df_f = getattr(df.double_col, func)
result = (
t.projection([(t.double_col - f()).name('double_col')])
.execute()
.double_col
)
expected = df.double_col - df_f()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func', ['mean', 'sum', 'min', 'max'])
def test_rolling_window(alltypes, func, df):
t = alltypes
df = (
df[['double_col', 'timestamp_col']]
.sort_values('timestamp_col')
.reset_index(drop=True)
)
window = ibis.window(order_by=t.timestamp_col, preceding=6, following=0)
f = getattr(t.double_col, func)
df_f = getattr(df.double_col.rolling(7, min_periods=0), func)
result = (
t.projection([f().over(window).name('double_col')])
.execute()
.double_col
)
expected = df_f()
tm.assert_series_equal(result, expected)
def test_rolling_window_with_mlb(alltypes):
t = alltypes
window = ibis.trailing_window(
preceding=rows_with_max_lookback(3, ibis.interval(days=5)),
order_by=t.timestamp_col,
)
expr = t['double_col'].sum().over(window)
with pytest.raises(NotImplementedError):
expr.execute()
@pytest.mark.parametrize('func', ['mean', 'sum', 'min', 'max'])
def test_partitioned_window(alltypes, func, df):
t = alltypes
window = ibis.window(
group_by=t.string_col,
order_by=t.timestamp_col,
preceding=6,
following=0,
)
def roller(func):
def rolled(df):
torder = df.sort_values('timestamp_col')
rolling = torder.double_col.rolling(7, min_periods=0)
return getattr(rolling, func)()
return rolled
f = getattr(t.double_col, func)
expr = f().over(window).name('double_col')
result = t.projection([expr]).execute().double_col
expected = (
df.groupby('string_col').apply(roller(func)).reset_index(drop=True)
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func', ['sum', 'min', 'max'])
def test_cumulative_simple_window(alltypes, func, df):
t = alltypes
f = getattr(t.double_col, func)
col = t.double_col - f().over(ibis.cumulative_window())
expr = t.projection([col.name('double_col')])
result = expr.execute().double_col
expected = df.double_col - getattr(df.double_col, 'cum%s' % func)()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func', ['sum', 'min', 'max'])
def test_cumulative_partitioned_window(alltypes, func, df):
t = alltypes
df = df.sort_values('string_col').reset_index(drop=True)
window = ibis.cumulative_window(group_by=t.string_col)
f = getattr(t.double_col, func)
expr = t.projection([(t.double_col - f().over(window)).name('double_col')])
result = expr.execute().double_col
expected = df.groupby(df.string_col).double_col.transform(
lambda c: c - getattr(c, 'cum%s' % func)()
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func', ['sum', 'min', 'max'])
def test_cumulative_ordered_window(alltypes, func, df):
t = alltypes
df = df.sort_values('timestamp_col').reset_index(drop=True)
window = ibis.cumulative_window(order_by=t.timestamp_col)
f = getattr(t.double_col, func)
expr = t.projection([(t.double_col - f().over(window)).name('double_col')])
result = expr.execute().double_col
expected = df.double_col - getattr(df.double_col, 'cum%s' % func)()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func', ['sum', 'min', 'max'])
def test_cumulative_partitioned_ordered_window(alltypes, func, df):
t = alltypes
df = df.sort_values(['string_col', 'timestamp_col']).reset_index(drop=True)
window = ibis.cumulative_window(
order_by=t.timestamp_col, group_by=t.string_col
)
f = getattr(t.double_col, func)
expr = t.projection([(t.double_col - f().over(window)).name('double_col')])
result = expr.execute().double_col
method = operator.methodcaller('cum{}'.format(func))
expected = df.groupby(df.string_col).double_col.transform(
lambda c: c - method(c)
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(('func', 'shift_amount'), [('lead', -1), ('lag', 1)])
def test_analytic_shift_functions(alltypes, df, func, shift_amount):
method = getattr(alltypes.double_col, func)
expr = method(1)
result = expr.execute().rename('double_col')
expected = df.double_col.shift(shift_amount)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('func', 'expected_index'), [('first', -1), ('last', 0)]
)
def test_first_last_value(alltypes, df, func, expected_index):
col = alltypes.sort_by(ibis.desc(alltypes.string_col)).double_col
method = getattr(col, func)
expr = method()
result = expr.execute().rename('double_col')
expected = pd.Series(
df.double_col.iloc[expected_index],
index=pd.RangeIndex(len(df)),
name='double_col',
)
tm.assert_series_equal(result, expected)
def test_null_column(alltypes):
t = alltypes
nrows = t.count().execute()
expr = t.mutate(na_column=ibis.NA).na_column
result = expr.execute()
tm.assert_series_equal(result, pd.Series([None] * nrows, name='na_column'))
def test_null_column_union(alltypes, df):
t = alltypes
s = alltypes[['double_col']].mutate(string_col=ibis.NA.cast('string'))
expr = t[['double_col', 'string_col']].union(s)
result = expr.execute()
nrows = t.count().execute()
expected = pd.concat(
[
df[['double_col', 'string_col']],
pd.concat(
[
df[['double_col']],
pd.DataFrame({'string_col': [None] * nrows}),
],
axis=1,
),
],
axis=0,
ignore_index=True,
)
tm.assert_frame_equal(result, expected)
def test_window_with_arithmetic(alltypes, df):
t = alltypes
w = ibis.window(order_by=t.timestamp_col)
expr = t.mutate(new_col=ibis.row_number().over(w) / 2)
df = (
df[['timestamp_col']]
.sort_values('timestamp_col')
.reset_index(drop=True)
)
expected = df.assign(new_col=[x / 2.0 for x in range(len(df))])
result = expr['timestamp_col', 'new_col'].execute()
tm.assert_frame_equal(result, expected)
def test_anonymous_aggregate(alltypes, df):
t = alltypes
expr = t[t.double_col > t.double_col.mean()]
result = expr.execute()
expected = df[df.double_col > df.double_col.mean()].reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@pytest.fixture
def array_types(con):
return con.table('array_types')
def test_array_length(array_types):
expr = array_types.projection(
[
array_types.x.length().name('x_length'),
array_types.y.length().name('y_length'),
array_types.z.length().name('z_length'),
]
)
result = expr.execute()
expected = pd.DataFrame(
{
'x_length': [3, 2, 2, 3, 3, 4],
'y_length': [3, 2, 2, 3, 3, 4],
'z_length': [3, 2, 2, 0, None, 4],
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
('column', 'value_type'),
[('x', dt.int64), ('y', dt.string), ('z', dt.double)],
)
def test_array_schema(array_types, column, value_type):
assert array_types[column].type() == dt.Array(value_type)
def test_array_collect(array_types):
expr = array_types.group_by(array_types.grouper).aggregate(
collected=lambda t: t.scalar_column.collect()
)
result = expr.execute().sort_values('grouper').reset_index(drop=True)
expected = pd.DataFrame(
{
'grouper': list('abc'),
'collected': [[1.0, 2.0, 3.0], [4.0, 5.0], [6.0]],
}
)[['grouper', 'collected']]
tm.assert_frame_equal(result, expected, check_column_type=False)
@pytest.mark.parametrize(
['start', 'stop'],
[
(1, 3),
(1, 1),
(2, 3),
(2, 5),
(None, 3),
(None, None),
(3, None),
# negative slices are not supported
param(
-3,
None,
marks=pytest.mark.xfail(
raises=ValueError, reason='Negative slicing not supported'
),
),
param(
None,
-3,
marks=pytest.mark.xfail(
raises=ValueError, reason='Negative slicing not supported'
),
),
param(
-3,
-1,
marks=pytest.mark.xfail(
raises=ValueError, reason='Negative slicing not supported'
),
),
param(
-3,
-1,
marks=pytest.mark.xfail(
raises=ValueError, reason='Negative slicing not supported'
),
),
],
)
def test_array_slice(array_types, start, stop):
expr = array_types[array_types.y[start:stop].name('sliced')]
result = expr.execute()
expected = pd.DataFrame(
{'sliced': array_types.y.execute().map(lambda x: x[start:stop])}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('index', [1, 3, 4, 11])
def test_array_index(array_types, index):
expr = array_types[array_types.y[index].name('indexed')]
result = expr.execute()
expected = pd.DataFrame(
{
'indexed': array_types.y.execute().map(
lambda x: x[index] if index < len(x) else None
)
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('n', [1, 3, 4, 7, -2])
@pytest.mark.parametrize(
'mul',
[
param(lambda x, n: x * n, id='mul'),
param(lambda x, n: n * x, id='rmul'),
],
)
def test_array_repeat(array_types, n, mul):
expr = array_types.projection([mul(array_types.x, n).name('repeated')])
result = expr.execute()
expected = pd.DataFrame(
{'repeated': array_types.x.execute().map(lambda x, n=n: mul(x, n))}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
'catop',
[
param(lambda x, y: x + y, id='concat'),
param(lambda x, y: y + x, id='rconcat'),
],
)
def test_array_concat(array_types, catop):
t = array_types
x, y = t.x.cast('array<string>').name('x'), t.y
expr = t.projection([catop(x, y).name('catted')])
result = expr.execute()
tuples = t.projection([x, y]).execute().itertuples(index=False)
expected = pd.DataFrame({'catted': [catop(i, j) for i, j in tuples]})
tm.assert_frame_equal(result, expected)
def test_array_concat_mixed_types(array_types):
with pytest.raises(TypeError):
array_types.x + array_types.x.cast('array<double>')
@pytest.fixture
def t(con, guid):
con.raw_sql(
"""
CREATE TABLE "{}" (
id SERIAL PRIMARY KEY,
name TEXT
)
""".format(
guid
)
)
return con.table(guid)
@pytest.fixture
def s(con, t, guid, guid2):
assert t.op().name == guid
assert t.op().name != guid2
con.raw_sql(
"""
CREATE TABLE "{}" (
id SERIAL PRIMARY KEY,
left_t_id INTEGER REFERENCES "{}",
cost DOUBLE PRECISION
)
""".format(
guid2, guid
)
)
return con.table(guid2)
@pytest.fixture
def trunc(con, guid):
con.raw_sql(
"""
CREATE TABLE "{}" (
id SERIAL PRIMARY KEY,
name TEXT
)
""".format(
guid
)
)
con.raw_sql(
"""INSERT INTO "{}" (name) VALUES ('a'), ('b'), ('c')""".format(guid)
)
return con.table(guid)
def test_semi_join(t, s):
t_a, s_a = t.op().sqla_table.alias('t0'), s.op().sqla_table.alias('t1')
expr = t.semi_join(s, t.id == s.id)
result = expr.compile().compile(compile_kwargs={'literal_binds': True})
base = sa.select([t_a.c.id, t_a.c.name]).where(
sa.exists(sa.select([1]).where(t_a.c.id == s_a.c.id))
)
expected = sa.select([base.c.id, base.c.name])
assert str(result) == str(expected)
def test_anti_join(t, s):
t_a, s_a = t.op().sqla_table.alias('t0'), s.op().sqla_table.alias('t1')
expr = t.anti_join(s, t.id == s.id)
result = expr.compile().compile(compile_kwargs={'literal_binds': True})
expected = sa.select([sa.column('id'), sa.column('name')]).select_from(
sa.select([t_a.c.id, t_a.c.name]).where(
~(sa.exists(sa.select([1]).where(t_a.c.id == s_a.c.id)))
)
)
assert str(result) == str(expected)
def test_create_table_from_expr(con, trunc, guid2):
con.create_table(guid2, expr=trunc)
t = con.table(guid2)
assert list(t.name.execute()) == list('abc')
def test_truncate_table(con, trunc):
assert list(trunc.name.execute()) == list('abc')
con.truncate_table(trunc.op().name)
assert not len(trunc.execute())
def test_head(con):
t = con.table('functional_alltypes')
result = t.head().execute()
expected = t.limit(5).execute()
tm.assert_frame_equal(result, expected)
def test_identical_to(con, df):
# TODO: abstract this testing logic out into parameterized fixtures
t = con.table('functional_alltypes')
dt = df[['tinyint_col', 'double_col']]
expr = t.tinyint_col.identical_to(t.double_col)
result = expr.execute()
expected = (dt.tinyint_col.isnull() & dt.double_col.isnull()) | (
dt.tinyint_col == dt.double_col
)
expected.name = result.name
tm.assert_series_equal(result, expected)
def test_rank(con):
t = con.table('functional_alltypes')
expr = t.double_col.rank()
sqla_expr = expr.compile()
result = str(sqla_expr.compile(compile_kwargs={'literal_binds': True}))
expected = (
"SELECT rank() OVER (ORDER BY t0.double_col) - 1 AS tmp \n"
"FROM functional_alltypes AS t0"
)
assert result == expected
def test_percent_rank(con):
t = con.table('functional_alltypes')
expr = t.double_col.percent_rank()
sqla_expr = expr.compile()
result = str(sqla_expr.compile(compile_kwargs={'literal_binds': True}))
expected = (
"SELECT percent_rank() OVER (ORDER BY t0.double_col) AS "
"tmp \nFROM functional_alltypes AS t0"
)
assert result == expected
def test_ntile(con):
t = con.table('functional_alltypes')
expr = t.double_col.ntile(7)
sqla_expr = expr.compile()
result = str(sqla_expr.compile(compile_kwargs={'literal_binds': True}))
expected = (
"SELECT ntile(7) OVER (ORDER BY t0.double_col) - 1 AS tmp \n"
"FROM functional_alltypes AS t0"
)
assert result == expected
@pytest.mark.parametrize('opname', ['invert', 'neg'])
def test_not_and_negate_bool(con, opname, df):
op = getattr(operator, opname)
t = con.table('functional_alltypes').limit(10)
expr = t.projection([op(t.bool_col).name('bool_col')])
result = expr.execute().bool_col
expected = op(df.head(10).bool_col)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
'field',
[
'tinyint_col',
'smallint_col',
'int_col',
'bigint_col',
'float_col',
'double_col',
'year',
'month',
],
)
def test_negate_non_boolean(con, field, df):
t = con.table('functional_alltypes').limit(10)
expr = t.projection([(-t[field]).name(field)])
result = expr.execute()[field]
expected = -df.head(10)[field]
tm.assert_series_equal(result, expected)
def test_negate_boolean(con, df):
t = con.table('functional_alltypes').limit(10)
expr = t.projection([(-t.bool_col).name('bool_col')])
result = expr.execute().bool_col
expected = -df.head(10).bool_col
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('opname', 'expected'),
[
('year', {2009, 2010}),
('month', set(range(1, 13))),
('day', set(range(1, 32))),
],
)
def test_date_extract_field(db, opname, expected):
op = operator.methodcaller(opname)
t = db.functional_alltypes
expr = op(t.timestamp_col.cast('date')).distinct()
result = expr.execute().astype(int)
assert set(result) == expected
@pytest.mark.parametrize('opname', ['sum', 'mean', 'min', 'max', 'std', 'var'])
def test_boolean_reduction(alltypes, opname, df):
op = operator.methodcaller(opname)
expr = op(alltypes.bool_col)
result = expr.execute()
assert result == op(df.bool_col)
def test_boolean_summary(alltypes):
expr = alltypes.bool_col.summary()
result = expr.execute()
expected = pd.DataFrame(
[[7300, 0, 0, 1, 3650, 0.5, 2]],
columns=[
'count',
'nulls',
'min',
'max',
'sum',
'mean',
'approx_nunique',
],
)
type_conversions = {
'count': 'int64',
'nulls': 'int64',
'min': 'bool',
'max': 'bool',
'sum': 'int64',
'approx_nunique': 'int64',
}
for k, v in type_conversions.items():
expected[k] = expected[k].astype(v)
tm.assert_frame_equal(result, expected)
def test_timestamp_with_timezone(con):
t = con.table('tzone')
result = t.ts.execute()
assert str(result.dtype.tz)
@pytest.fixture(
params=[
None,
'UTC',
'America/New_York',
'America/Los_Angeles',
'Europe/Paris',
'Chile/Continental',
'Asia/Tel_Aviv',
'Asia/Tokyo',
'Africa/Nairobi',
'Australia/Sydney',
]
)
def tz(request):
return request.param
@pytest.fixture
def tzone_compute(con, guid, tz):
schema = ibis.schema(
[('ts', dt.Timestamp(tz)), ('b', 'double'), ('c', 'string')]
)
con.create_table(guid, schema=schema)
t = con.table(guid)
n = 10
df = pd.DataFrame(
{
'ts': pd.date_range('2017-04-01', periods=n, tz=tz).values,
'b': np.arange(n).astype('float64'),
'c': list(string.ascii_lowercase[:n]),
}
)
df.to_sql(
guid,
con.con,
index=False,
if_exists='append',
dtype={'ts': sa.TIMESTAMP(timezone=True), 'b': sa.FLOAT, 'c': sa.TEXT},
)
try:
yield t
finally:
con.drop_table(guid)
assert guid not in con.list_tables()
def test_ts_timezone_is_preserved(tzone_compute, tz):
assert dt.Timestamp(tz).equals(tzone_compute.ts.type())
def test_timestamp_with_timezone_select(tzone_compute, tz):
ts = tzone_compute.ts.execute()
assert str(getattr(ts.dtype, 'tz', None)) == str(tz)
def test_timestamp_type_accepts_all_timezones(con):
assert all(
dt.Timestamp(row.name).timezone == row.name
for row in con.con.execute('SELECT name FROM pg_timezone_names')
)
@pytest.mark.parametrize(
('left', 'right', 'type'),
[
param(L('2017-04-01'), date(2017, 4, 2), dt.date, id='ibis_date'),
param(date(2017, 4, 2), L('2017-04-01'), dt.date, id='python_date'),
param(
L('2017-04-01 01:02:33'),
datetime(2017, 4, 1, 1, 3, 34),
dt.timestamp,
id='ibis_timestamp',
),
param(
datetime(2017, 4, 1, 1, 3, 34),
L('2017-04-01 01:02:33'),
dt.timestamp,
id='python_datetime',
),
],
)
@pytest.mark.parametrize('opname', ['eq', 'ne', 'lt', 'le', 'gt', 'ge'])
def test_string_temporal_compare(con, opname, left, right, type):
op = getattr(operator, opname)
expr = op(left, right)
result = con.execute(expr)
left_raw = con.execute(L(left).cast(type))
right_raw = con.execute(L(right).cast(type))
expected = op(left_raw, right_raw)
assert result == expected
@pytest.mark.parametrize(
('left', 'right'),
[
param(L('2017-03-31').cast(dt.date), date(2017, 4, 2), id='ibis_date'),
param(
date(2017, 3, 31), L('2017-04-02').cast(dt.date), id='python_date'
),
param(
L('2017-03-31 00:02:33').cast(dt.timestamp),
datetime(2017, 4, 1, 1, 3, 34),
id='ibis_timestamp',
),
param(
datetime(2017, 3, 31, 0, 2, 33),
L('2017-04-01 01:03:34').cast(dt.timestamp),
id='python_datetime',
),
],
)
@pytest.mark.parametrize(
'op',
[
param(
lambda left, right: ibis.timestamp('2017-04-01 00:02:34').between(
left, right
),
id='timestamp',
),
param(
lambda left, right: (
ibis.timestamp('2017-04-01').cast(dt.date).between(left, right)
),
id='date',
),
],
)
def test_string_temporal_compare_between(con, op, left, right):
expr = op(left, right)
result = con.execute(expr)
assert isinstance(result, (bool, np.bool_))
assert result
def test_scalar_parameter(con):
start = ibis.param(dt.date)
end = ibis.param(dt.date)
t = con.table('functional_alltypes')
col = t.date_string_col.cast('date')
expr = col.between(start, end)
start_string, end_string = '2009-03-01', '2010-07-03'
result = expr.execute(params={start: start_string, end: end_string})
expected = col.between(start_string, end_string).execute()
tm.assert_series_equal(result, expected)
def test_string_to_binary_cast(con):
t = con.table('functional_alltypes').limit(10)
expr = t.string_col.cast('binary')
result = expr.execute()
sql_string = (
"SELECT decode(string_col, 'escape') AS tmp "
"FROM functional_alltypes LIMIT 10"
)
raw_data = [row[0][0] for row in con.raw_sql(sql_string).fetchall()]
expected = pd.Series(raw_data, name='tmp')
tm.assert_series_equal(result, expected)
def test_string_to_binary_round_trip(con):
t = con.table('functional_alltypes').limit(10)
expr = t.string_col.cast('binary').cast('string')
result = expr.execute()
sql_string = (
"SELECT encode(decode(string_col, 'escape'), 'escape') AS tmp "
"FROM functional_alltypes LIMIT 10"
)
expected = pd.Series(
[row[0][0] for row in con.raw_sql(sql_string).fetchall()], name='tmp'
)
tm.assert_series_equal(result, expected)
|
apache-2.0
|
celiafish/VisTrails
|
setup.py
|
2
|
3886
|
import os
from setuptools import setup
os.chdir(os.path.abspath(os.path.dirname(__file__)))
packages = []
for rootdir, dirs, files in os.walk('vistrails'):
if '__init__.py' in files:
packages.append(rootdir.replace('\\', '.').replace('/', '.'))
def list_files(d, root):
files = []
for e in os.listdir(os.path.join(root, d)):
if os.path.isdir(os.path.join(root, d, e)):
files.extend(list_files('%s/%s' % (d, e), root))
elif not e.endswith('.pyc'):
files.append('%s/%s' % (d, e))
return files
package_data = {
'vistrails.core.collection': ['schema.sql', 'test.db'],
'vistrails.core': list_files('resources', 'vistrails/core'),
'vistrails.db': ['specs/all.xml'],
'vistrails.gui': list_files('resources/images', 'vistrails/gui') + ['resources/vistrails-mime.xml'],
'vistrails.packages.analytics': ['*.vt'], # FIXME : what is this?
'vistrails.packages.CLTools': ['icons/*.png', 'test_files/*'],
'vistrails.packages.persistence': ['schema.sql'],
'vistrails.packages.tabledata': ['test_files/*'],
'vistrails.tests': list_files('resources', 'vistrails/tests'),
}
for version in os.listdir('vistrails/db/versions'):
if not version.startswith('v'):
continue
package_data['vistrails.db.versions.%s' % version] = [
'schemas/sql/vistrails.sql',
'schemas/sql/vistrails_drop.sql',
'schemas/xml/log.xsd',
'schemas/xml/vistrail.xsd',
'schemas/xml/vtlink.xsd',
'schemas/xml/workflow.xsd',
'specs/all.xml',
]
description = """
VisTrails is an open-source data analysis and visualization tool. It provides a comprehensive provenance infrastructure that maintains detailed history information about the steps followed and data derived in the course of an exploratory task: VisTrails maintains provenance of data products, of the computational processes that derive these products and their executions.
For more information, take a look at the `documentation <http://www.vistrails.org/index.php/Documentation>`_, the `users guide <http://www.vistrails.org/usersguide/v2.0/html/>`_, or our `publications <http://www.vistrails.org/index.php/Publications,_Tutorials_and_Presentations>`_.
Binary releases are available on our `download <http://www.vistrails.org/index.php/Downloads>`_ page. To report bugs, please use the github `issue tracker <https://github.com/VisTrails/VisTrails/issues>`_, after checking our `FAQ <http://www.vistrails.org/index.php/FAQ>`_ for known issues.
Homepage: http://www.vistrails.org
Who we are: http://www.vistrails.org/index.php/People
"""
setup(name='vistrails',
version='2.2',
packages=packages,
package_data=package_data,
entry_points={
'console_scripts': [
'vistrails = vistrails.run:main']},
zip_safe=False,
install_requires=[
# 'PyQt<5.0',
'numpy',
'scipy',
'certifi',
'backports.ssl_match_hostname'],
description='Data analysis and visualization tool',
author="New York University",
author_email='[email protected]',
url='http://www.vistrails.org/',
long_description=description,
license='BSD',
keywords=['vistrails', 'provenance', 'visualization', 'vtk', 'nyu',
'matplotlib', ],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 2 :: Only',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Visualization'])
|
bsd-3-clause
|
volodymyrss/3ML
|
threeML/utils/data_builders/time_series_builder.py
|
1
|
28927
|
import numpy as np
from threeML.utils.time_series.time_series import TimeSeries
from threeML.io.file_utils import file_existing_and_readable
from threeML.exceptions.custom_exceptions import custom_warnings
from threeML.plugins.OGIPLike import OGIPLike
from threeML.plugins.OGIP.pha import PHAWrite
from threeML.plugins.spectrum.binned_spectrum import BinnedSpectrum, BinnedSpectrumWithDispersion
from threeML.utils.data_builders.fermi.gbm_data import GBMTTEFile, GBMCdata
from threeML.utils.data_builders.fermi.lat_data import LLEFile
from threeML.utils.time_series.event_list import EventListWithDeadTime, EventListWithLiveTime, EventList
from threeML.utils.time_series.binned_spectrum_series import BinnedSpectrumSeries
from threeML.plugins.OGIP.response import InstrumentResponse, InstrumentResponseSet, OGIPResponse
from threeML.plugins.SpectrumLike import SpectrumLike, NegativeBackground
from threeML.plugins.DispersionSpectrumLike import DispersionSpectrumLike
from threeML.utils.time_interval import TimeIntervalSet
from threeML.io.progress_bar import progress_bar
import copy
import re
import astropy.io.fits as fits
class BinningMethodError(RuntimeError):
pass
class TimeSeriesBuilder(object):
def __init__(self, name, time_series, response=None,
poly_order=-1, unbinned=True, verbose=True, restore_poly_fit=None):
"""
Class for handling generic time series data including binned and event list
series. Depending on the data, this class builds either a SpectrumLike or
DisperisonSpectrumLike plugin
For specific instruments, use the TimeSeries.from() classmethods
:param name: name for the plugin
:param time_series: a TimeSeries instance
:param response: options InstrumentResponse instance
:param poly_order: the polynomial order to use for background fitting
:param unbinned: if the background should be fit unbinned
:param verbose: the verbosity switch
:param restore_poly_fit: file from which to read a prefitted background
"""
assert isinstance(time_series, TimeSeries), "must be a TimeSeries instance"
self._name = name
self._time_series = time_series # type: TimeSeries
# make sure we have a proper response
if response is not None:
assert isinstance(response, InstrumentResponse) or isinstance(response,
InstrumentResponseSet), 'Response must be an instance of InstrumentResponse'
# deal with RSP weighting if need be
if isinstance(response, InstrumentResponseSet):
# we have a weighted response
self._rsp_is_weighted = True
self._weighted_rsp = response
# just get a dummy response for the moment
# it will be corrected when we set the interval
self._response = InstrumentResponse.create_dummy_response(response.ebounds,
response.monte_carlo_energies)
else:
self._rsp_is_weighted = False
self._weighted_rsp = None
self._response = response
self._verbose = verbose
self._active_interval = None
self._observed_spectrum = None
self._background_spectrum = None
self._time_series.poly_order = poly_order
self._default_unbinned = unbinned
# try and restore the poly fit if requested
if restore_poly_fit is not None:
if file_existing_and_readable(restore_poly_fit):
self._time_series.restore_fit(restore_poly_fit)
if verbose:
print('Successfully restored fit from %s'%restore_poly_fit)
# In theory this will automatically get the poly counts if a
# time interval already exists
#
# if self._response is None:
#
# self._background_spectrum = BinnedSpectrum.from_time_series(self._time_series, use_poly=True)
#
# else:
# self._background_spectrum = BinnedSpectrumWithDispersion.from_time_series(self._time_series,
# self._response,
# use_poly=True)
else:
custom_warnings.warn(
"Could not find saved background %s." % restore_poly_fit)
def _output(self):
pass
# super_out = super(EventListLike, self)._output()
# return super_out.append(self._time_series._output())
def __set_poly_order(self, value):
"""Background poly order setter """
self._time_series.poly_order = value
def ___set_poly_order(self, value):
""" Indirect poly order setter """
self.__set_poly_order(value)
def __get_poly_order(self):
""" Get poly order """
return self._time_series.poly_order
def ___get_poly_order(self):
""" Indirect poly order getter """
return self.__get_poly_order()
background_poly_order = property(___get_poly_order, ___set_poly_order,
doc="Get or set the background polynomial order")
def set_active_time_interval(self, *intervals, **kwargs):
"""
Set the time interval to be used during the analysis.
For now, only one interval can be selected. This may be
updated in the future to allow for self consistent time
resolved analysis.
Specified as 'tmin-tmax'. Intervals are in seconds. Example:
set_active_time_interval("0.0-10.0")
which will set the energy range 0-10. seconds.
:param options:
:param intervals:
:return:
"""
self._time_series.set_active_time_intervals(*intervals)
# extract a spectrum
if self._response is None:
self._observed_spectrum = BinnedSpectrum.from_time_series(self._time_series, use_poly=False)
else:
if self._rsp_is_weighted:
self._response = self._weighted_rsp.weight_by_counts(*self._time_series.time_intervals.to_string().split(','))
self._observed_spectrum = BinnedSpectrumWithDispersion.from_time_series(self._time_series, self._response,
use_poly=False)
self._active_interval = intervals
if self._time_series.poly_fit_exists:
if self._response is None:
self._background_spectrum = BinnedSpectrum.from_time_series(self._time_series, use_poly=True)
else:
self._background_spectrum = BinnedSpectrumWithDispersion.from_time_series(self._time_series,
self._response, use_poly=True)
self._tstart = self._time_series.time_intervals.absolute_start_time
self._tstop = self._time_series.time_intervals.absolute_stop_time
def set_background_interval(self, *intervals, **options):
"""
Set the time interval to fit the background.
Multiple intervals can be input as separate arguments
Specified as 'tmin-tmax'. Intervals are in seconds. Example:
setBackgroundInterval("-10.0-0.0","10.-15.")
:param *intervals:
:param **options:
:return: none
"""
if 'unbinned' in options:
unbinned = options.pop('unbinned')
else:
unbinned = self._default_unbinned
self._time_series.set_polynomial_fit_interval(*intervals, unbinned=unbinned)
# In theory this will automatically get the poly counts if a
# time interval already exists
if self._active_interval is not None:
if self._response is None:
self._background_spectrum = BinnedSpectrum.from_time_series(self._time_series, use_poly=True)
else:
# we do not need to worry about the interval of the response if it is a set. only the ebounds are extracted here
self._background_spectrum = BinnedSpectrumWithDispersion.from_time_series(self._time_series, self._response,
use_poly=True)
def write_pha_from_binner(self, file_name, start=None, stop=None, overwrite=False):
"""
Write PHA fits files from the selected bins. If writing from an event list, the
bins are from create_time_bins. If using a pre-time binned time series, the bins are those
native to the data. Start and stop times can be used to control which bins are written to files
:param file_name: the file name of the output files
:param start: optional start time of the bins
:param stop: optional stop time of the bins
:param overwrite: if the fits files should be overwritten
:return: None
"""
# we simply create a bunch of dispersion plugins and convert them to OGIP
ogip_list = [OGIPLike.from_general_dispersion_spectrum(sl) for sl in self.to_spectrumlike(from_bins=True,
start=start,
stop=stop)]
# write out the PHAII file
pha_writer = PHAWrite(*ogip_list)
pha_writer.write(file_name, overwrite=overwrite)
def get_background_parameters(self):
"""
Returns a pandas DataFrame containing the background polynomial
coefficients for each channel.
"""
return self._time_series.get_poly_info()
def save_background(self, filename, overwrite=False):
"""
save the background to and HDF5 file. The filename does not need an extension.
The filename will be saved as <filename>_bkg.h5
:param filename: name of file to save
:param overwrite: to overwrite or not
:return:
"""
self._time_series.save_background(filename, overwrite)
def view_lightcurve(self, start=-10, stop=20., dt=1., use_binner=False):
# type: (float, float, float, bool) -> None
"""
:param start:
:param stop:
:param dt:
:param use_binner:
"""
return self._time_series.view_lightcurve(start, stop, dt, use_binner)
@property
def tstart(self):
"""
:return: start time of the active interval
"""
return self._tstart
@property
def tstop(self):
"""
:return: stop time of the active interval
"""
return self._tstop
@property
def bins(self):
return self._time_series.bins
def read_bins(self, time_series_builder):
"""
Read the temporal bins from another *binned* TimeSeriesBuilder instance
and apply those bins to this instance
:param time_series_builder: *binned* time series builder to copy
:return:
"""
other_bins = time_series_builder.bins.bin_stack
self.create_time_bins(other_bins[:,0], other_bins[:,1], method='custom')
def create_time_bins(self, start, stop, method='constant', **options):
"""
Create time bins from start to stop with a given method (constant, siginificance, bayesblocks, custom).
Each method has required keywords specified in the parameters. Once created, this can be used as
a JointlikelihoodSet generator, or as input for viewing the light curve.
:param start: start of the bins or array of start times for custom mode
:param stop: stop of the bins or array of stop times for custom mode
:param method: constant, significance, bayesblocks, custom
:param dt: <constant method> delta time of the
:param sigma: <significance> sigma level of bins
:param min_counts: (optional) <significance> minimum number of counts per bin
:param p0: <bayesblocks> the chance probability of having the correct bin configuration.
:return:
"""
assert isinstance(self._time_series, EventList), 'can only bin event lists currently'
# if 'use_energy_mask' in options:
#
# use_energy_mask = options.pop('use_energy_mask')
#
# else:
#
# use_energy_mask = False
if method == 'constant':
if 'dt' in options:
dt = float(options.pop('dt'))
else:
raise RuntimeError('constant bins requires the dt option set!')
self._time_series.bin_by_constant(start, stop, dt)
elif method == 'significance':
if 'sigma' in options:
sigma = options.pop('sigma')
else:
raise RuntimeError('significance bins require a sigma argument')
if 'min_counts' in options:
min_counts = options.pop('min_counts')
else:
min_counts = 10
# removed for now
# should we mask the data
# if use_energy_mask:
#
# mask = self._mask
#
# else:
#
# mask = None
self._time_series.bin_by_significance(start, stop, sigma=sigma, min_counts=min_counts, mask=None)
elif method == 'bayesblocks':
if 'p0' in options:
p0 = options.pop('p0')
else:
p0 = 0.1
if 'use_background' in options:
use_background = options.pop('use_background')
else:
use_background = False
self._time_series.bin_by_bayesian_blocks(start, stop, p0, use_background)
elif method == 'custom':
if type(start) is not list:
if type(start) is not np.ndarray:
raise RuntimeError('start must be and array in custom mode')
if type(stop) is not list:
if type(stop) is not np.ndarray:
raise RuntimeError('stop must be and array in custom mode')
assert len(start) == len(stop), 'must have equal number of start and stop times'
self._time_series.bin_by_custom(start, stop)
else:
raise BinningMethodError('Only constant, significance, bayesblock, or custom method argument accepted.')
if self._verbose:
print('Created %d bins via %s'% (len(self._time_series.bins), method))
def to_spectrumlike(self, from_bins=False, start=None, stop=None, interval_name='_interval'):
"""
Create plugin(s) from either the current active selection or the time bins.
If creating from an event list, the
bins are from create_time_bins. If using a pre-time binned time series, the bins are those
native to the data. Start and stop times can be used to control which bins are used.
:param from_bins: choose to create plugins from the time bins
:param start: optional start time of the bins
:param stop: optional stop time of the bins
:return: SpectrumLike plugin(s)
"""
# this is for a single interval
if not from_bins:
assert self._observed_spectrum is not None, 'Must have selected an active time interval'
if self._response is None:
return SpectrumLike(name=self._name,
observation=self._observed_spectrum,
background=self._background_spectrum,
verbose=self._verbose)
else:
return DispersionSpectrumLike(name=self._name,
observation=self._observed_spectrum,
background=self._background_spectrum,
verbose=self._verbose)
else:
# this is for a set of intervals.
assert self._time_series.bins is not None, 'This time series does not have any bins!'
# save the original interval if there is one
old_interval = copy.copy(self._active_interval)
old_verbose = copy.copy(self._verbose)
# we will keep it quiet to keep from being annoying
self._verbose = False
list_of_speclikes = []
# get the bins from the time series
# for event lists, these are from created bins
# for binned spectra sets, these are the native bines
these_bins = self._time_series.bins # type: TimeIntervalSet
if start is not None:
assert stop is not None, 'must specify a start AND a stop time'
if stop is not None:
assert stop is not None, 'must specify a start AND a stop time'
these_bins = these_bins.containing_interval(start, stop, inner=False)
# loop through the intervals and create spec likes
with progress_bar(len(these_bins), title='Creating plugins') as p:
for i, interval in enumerate(these_bins):
self.set_active_time_interval(interval.to_string())
try:
if self._response is None:
sl = SpectrumLike(name="%s%s%d" % (self._name, interval_name, i),
observation=self._observed_spectrum,
background=self._background_spectrum,
verbose=self._verbose)
else:
sl = DispersionSpectrumLike(name="%s%s%d" % (self._name, interval_name, i),
observation=self._observed_spectrum,
background=self._background_spectrum,
verbose=self._verbose)
list_of_speclikes.append(sl)
except(NegativeBackground):
custom_warnings.warn('Something is wrong with interval %s. skipping.' % interval)
p.increase()
# restore the old interval
if old_interval is not None:
self.set_active_time_interval(*old_interval)
else:
self._active_interval = None
self._verbose = old_verbose
return list_of_speclikes
@classmethod
def from_gbm_tte(cls, name, tte_file, rsp_file, restore_background=None,
trigger_time=None,
poly_order=-1, unbinned=True, verbose=True):
"""
A plugin to natively bin, view, and handle Fermi GBM TTE data.
A TTE event file are required as well as the associated response
Background selections are specified as
a comma separated string e.g. "-10-0,10-20"
Initial source selection is input as a string e.g. "0-5"
One can choose a background polynomial order by hand (up to 4th order)
or leave it as the default polyorder=-1 to decide by LRT test
:param name: name for your choosing
:param tte_file: GBM tte event file
:param rsp_file: Associated TTE CSPEC response file
:param trigger_time: trigger time if needed
:param poly_order: 0-4 or -1 for auto
:param unbinned: unbinned likelihood fit (bool)
:param verbose: verbose (bool)
"""
# self._default_unbinned = unbinned
# Load the relevant information from the TTE file
gbm_tte_file = GBMTTEFile(tte_file)
# Set a trigger time if one has not been set
if trigger_time is not None:
gbm_tte_file.trigger_time = trigger_time
# Create the the event list
event_list = EventListWithDeadTime(arrival_times=gbm_tte_file.arrival_times - gbm_tte_file.trigger_time,
energies=gbm_tte_file.energies,
n_channels=gbm_tte_file.n_channels,
start_time=gbm_tte_file.tstart - gbm_tte_file.trigger_time,
stop_time=gbm_tte_file.tstop - gbm_tte_file.trigger_time,
dead_time=gbm_tte_file.deadtime,
first_channel=0,
instrument=gbm_tte_file.det_name,
mission=gbm_tte_file.mission,
verbose=verbose)
# we need to see if this is an RSP2
test = re.match('^.*\.rsp2$', rsp_file)
# some GBM RSPs that are not marked RSP2 are in fact RSP2s
# we need to check
if test is None:
with fits.open(rsp_file) as f:
# there should only be a header, ebounds and one spec rsp extension
if len(f) > 3:
# make test a dummy value to trigger the nest loop
test = -1
custom_warnings.warn('The RSP file is marked as a single response but in fact has multiple matrices. We will treat it as an RSP2')
if test is not None:
rsp = InstrumentResponseSet.from_rsp2_file(rsp2_file=rsp_file,
counts_getter=event_list.counts_over_interval,
exposure_getter=event_list.exposure_over_interval,
reference_time=gbm_tte_file.trigger_time)
else:
rsp = OGIPResponse(rsp_file)
# pass to the super class
return cls(name,
event_list,
response=rsp,
poly_order=poly_order,
unbinned=unbinned,
verbose=verbose,
restore_poly_fit=restore_background)
@classmethod
def from_gbm_cspec_or_ctime(cls, name, cspec_or_ctime_file, rsp_file, restore_background=None,
trigger_time=None,
poly_order=-1, verbose=True):
"""
A plugin to natively bin, view, and handle Fermi GBM TTE data.
A TTE event file are required as well as the associated response
Background selections are specified as
a comma separated string e.g. "-10-0,10-20"
Initial source selection is input as a string e.g. "0-5"
One can choose a background polynomial order by hand (up to 4th order)
or leave it as the default polyorder=-1 to decide by LRT test
:param name: name for your choosing
:param tte_file: GBM tte event file
:param rsp_file: Associated TTE CSPEC response file
:param trigger_time: trigger time if needed
:param poly_order: 0-4 or -1 for auto
:param unbinned: unbinned likelihood fit (bool)
:param verbose: verbose (bool)
"""
# self._default_unbinned = unbinned
# Load the relevant information from the TTE file
cdata = GBMCdata(cspec_or_ctime_file, rsp_file)
# Set a trigger time if one has not been set
if trigger_time is not None:
cdata.trigger_time = trigger_time
# Create the the event list
event_list = BinnedSpectrumSeries(cdata.spectrum_set,
first_channel=0,
mission='Fermi',
instrument=cdata.det_name,
verbose=verbose)
# we need to see if this is an RSP2
test = re.match('^.*\.rsp2$', rsp_file)
# some GBM RSPs that are not marked RSP2 are in fact RSP2s
# we need to check
if test is None:
with fits.open(rsp_file) as f:
# there should only be a header, ebounds and one spec rsp extension
if len(f) > 3:
# make test a dummy value to trigger the nest loop
test = -1
custom_warnings.warn(
'The RSP file is marked as a single response but in fact has multiple matrices. We will treat it as an RSP2')
if test is not None:
rsp = InstrumentResponseSet.from_rsp2_file(rsp2_file=rsp_file,
counts_getter=event_list.counts_over_interval,
exposure_getter=event_list.exposure_over_interval,
reference_time=cdata.trigger_time)
else:
rsp = OGIPResponse(rsp_file)
# pass to the super class
return cls(name,
event_list,
response=rsp,
poly_order=poly_order,
unbinned=False,
verbose=verbose,
restore_poly_fit=restore_background)
@classmethod
def from_lat_lle(cls, name, lle_file, ft2_file, rsp_file, restore_background=None,
trigger_time=None, poly_order=-1, unbinned=False, verbose=True):
"""
A plugin to natively bin, view, and handle Fermi LAT LLE data.
An LLE event file and FT2 (1 sec) are required as well as the associated response
Background selections are specified as
a comma separated string e.g. "-10-0,10-20"
Initial source selection is input as a string e.g. "0-5"
One can choose a background polynomial order by hand (up to 4th order)
or leave it as the default polyorder=-1 to decide by LRT test
:param name: name of the plugin
:param lle_file: lle event file
:param ft2_file: fermi FT2 file
:param rsp_file: lle response file
:param trigger_time: trigger time if needed
:param poly_order: 0-4 or -1 for auto
:param unbinned: unbinned likelihood fit (bool)
:param verbose: verbose (bool)
"""
lat_lle_file = LLEFile(lle_file, ft2_file, rsp_file)
if trigger_time is not None:
lat_lle_file.trigger_time = trigger_time
# Mark channels less than 50 MeV as bad
channel_30MeV = np.searchsorted(lat_lle_file.energy_edges[0], 30000.) - 1
native_quality = np.zeros(lat_lle_file.n_channels, dtype=int)
idx = np.arange(lat_lle_file.n_channels) < channel_30MeV
native_quality[idx] = 5
event_list = EventListWithLiveTime(
arrival_times=lat_lle_file.arrival_times - lat_lle_file.trigger_time,
energies=lat_lle_file.energies,
n_channels=lat_lle_file.n_channels,
live_time=lat_lle_file.livetime,
live_time_starts=lat_lle_file.livetime_start - lat_lle_file.trigger_time,
live_time_stops=lat_lle_file.livetime_stop - lat_lle_file.trigger_time,
start_time=lat_lle_file.tstart - lat_lle_file.trigger_time,
stop_time=lat_lle_file.tstop - lat_lle_file.trigger_time,
quality=native_quality,
first_channel=1,
# rsp_file=rsp_file,
instrument=lat_lle_file.instrument,
mission=lat_lle_file.mission,
verbose=verbose)
# pass to the super class
rsp = OGIPResponse(rsp_file)
return cls(name,
event_list,
response=rsp,
poly_order=poly_order,
unbinned=unbinned,
verbose=verbose,
restore_poly_fit=restore_background)
@classmethod
def from_phaII(cls):
raise NotImplementedError('Reading from a generic PHAII file is not yet supportedgb')
@classmethod
def from_polar(cls):
raise NotImplementedError('Reading of POLAR data is not yet supported')
|
bsd-3-clause
|
simiden/BangsimonStocks
|
stockPlot.py
|
2
|
3222
|
from stockInfo import stockInfo
import matplotlib.pyplot as plt
from datetime import date
from stockAnalysis import movingAverage
import matplotlib
def stockPlot(s,k,fromDate=None,toDate=None, MovingAvg = False, N = 20, Volume=False):
"""
# Use: stockPlot(s,k,fromDate,toDate)
# Pre: s is a stockinfo object, k is a string which describes an attribute of s
# fromDate and toDate are optional date objects, MovingAvg and Volume are boolean
# N is an Integer >= 0.
# Post: We have a plot of the attribute k of s vs time,
# where the possible attributes are, in order:
# Open, High, Low, Close, Adj Close, and Avg. Price
# Moving Average with over the period of last N days is plotted if Moving Avg is true,
# If Volume is true: Below the plot we have a bar graph of the volume of s over time,
# where the bar is green if the volume is 'positive' (the day's closing price was higher than the opening price),
# and red if the volume is 'negative' (opening price higher than closing)
"""
if fromDate==None:
fromDate=s.fromDate
if toDate==None:
toDate=s.toDate
attrs={'Date':0, 'Open':1, 'High':2, 'Low':3, 'Close':4, 'Adj Close':5, 'Avg. Price':6}
infoList=s.listFromTo(fromDate,toDate)
date=lambda d: d[0]
dateList=map(date,infoList)
fig = plt.gcf()
ax = fig.add_subplot(1,1,1)
a = attrs[k]
if a == 6:
getData =lambda d: (d[1] + d[4])/2
else:
if a==5:
getData =lambda d: d[6]
else:
getData=lambda d:d[a]
dataList=map(getData,infoList)
if MovingAvg:
l=movingAverage(s, N, fromDate, toDate)
ax.plot(dateList,l,'r--',dateList,dataList,'b')
ax.set_ylabel(k + " (blue) and Moving average over " + str(N) + " days (red)")
else:
ax.plot(dateList, dataList)
ax.set_ylabel(k)
ax.set_xlabel("Dates")
if Volume:
# shift y-limits of the plot so that there is space at the bottom for the volume bar chart
pad = 0.25
yl = ax.get_ylim()
ax.set_ylim(yl[0]-(yl[1]-yl[0])*pad,yl[1])
vol=lambda d: d[5]
# create the second axis for the volume bar chart
ax2 = ax.twinx()
# set the position of ax2 so that it is short (y2=0.32) but otherwise the same size as ax
ax2.set_position(matplotlib.transforms.Bbox([[0.125,0.1],[0.9,0.32]]))
# make bar charts and color differently depending on up/down for the day
posList=[]
negList=[]
for i in infoList:
if i[1]-i[4]<0:
posList.append(i)
else: negList.append(i)
ax2.bar(map(date,posList),map(vol,posList),color='green',width=1,align='center')
ax2.bar(map(date,negList),map(vol,negList),color='red',width=1,align='center')
ax2.yaxis.set_label_position("right")
ax2.set_ylabel('Volume')
return fig
def smaPlot(s,N=20,fromDate=None,toDate=None):
return stockPlot(s,"Adj Close",fromDate,toDate,True,N)
if __name__ == "__main__":
Google = stockInfo("GOOG",date(2012,1,1),date.today())
s = stockPlot(Google,'Adj Close',None,None,False,100,None,True)
|
gpl-3.0
|
hayj/WorkspaceManager
|
workspacemanager/pewinst.py
|
1
|
2381
|
# coding: utf-8
# https://stackoverflow.com/questions/4757178/how-do-you-set-your-pythonpath-in-an-already-created-virtualenv
"""
This script install all req from the workspace in st-venv (pew)
"""
import os
import sh
try:
from utils import *
except:
from workspacemanager.utils import *
from pathlib import Path
def homeDir():
return str(Path.home())
# import sys
# print(sys.path)
# exit()
def installSublReqs():
venvName = "st-venv"
workspacePath = homeDir() + "/Workspace"
venvPath = homeDir() + "/.virtualenvs/" + venvName
projects = getAllProjects(workspacePath)
print("Installing all projects in the python path of " + venvName + "...")
for current in projects.keys():
installReqs(current, venvName=venvName)
# thePath = current + "/requirements.txt"
# if isFile(thePath):
# print(fileToStr(thePath))
# try:
# sh.pew("in", venvName, "pip", "install", "-r", thePath)
# except Exception as e:
# print(str(e))
# print("Installing all requirements in " + thePath)
# print(script)
# strToFile(script, pythonpathPath)
"""
pew in st-venv pip install https://github.com/misja/python-boilerpipe/zipball/master#egg=python-boilerpipe
pew in st-venv pip install newspaper3k
pew in st-venv pip install news-please
pew in st-venv pip uninstall -y pymongo
pew in st-venv pip uninstall -y bson
pew in st-venv pip install pymongo
pew in st-venv pip install --no-binary pandas -I pandas
"""
if __name__ == '__main__':
installSublReqs()
"""
packageList = []
for root, subdirs, files in os.walk(workspacePath):
if "__init__.py" in files:
packageList.append(root)
toDelete = set()
for i in range(len(packageList)):
for u in range(len(packageList)):
if u != i:
first = packageList[i]
second = packageList[u]
if second.startswith(first):
toDelete.add(u)
newPackageList = []
for u in range(len(packageList)):
if u not in toDelete:
newPackageList.append(packageList[u])
packageList = newPackageList
# We delete all "/build/lib" and we get parent dirs:
newPackageList = []
for current in packageList:
if "/build/lib" not in current:
newPackageList.append(getParentDir(current))
packageList = newPackageList
packageList = list(set(packageList))
script = ""
newLine = "\n"
for current in packageList:
script += 'export PYTHONPATH="$PYTHONPATH:' + current + '"' + newLine
"""
|
mit
|
zorroblue/scikit-learn
|
sklearn/neighbors/tests/test_quad_tree.py
|
28
|
3789
|
import pickle
import numpy as np
from sklearn.neighbors.quad_tree import _QuadTree
from sklearn.utils import check_random_state
def test_quadtree_boundary_computation():
# Introduce a point into a quad tree with boundaries not easy to compute.
Xs = []
# check a random case
Xs.append(np.array([[-1, 1], [-4, -1]], dtype=np.float32))
# check the case where only 0 are inserted
Xs.append(np.array([[0, 0], [0, 0]], dtype=np.float32))
# check the case where only negative are inserted
Xs.append(np.array([[-1, -2], [-4, 0]], dtype=np.float32))
# check the case where only small numbers are inserted
Xs.append(np.array([[-1e-6, 1e-6], [-4e-6, -1e-6]], dtype=np.float32))
for X in Xs:
tree = _QuadTree(n_dimensions=2, verbose=0)
tree.build_tree(X)
tree._check_coherence()
def test_quadtree_similar_point():
# Introduce a point into a quad tree where a similar point already exists.
# Test will hang if it doesn't complete.
Xs = []
# check the case where points are actually different
Xs.append(np.array([[1, 2], [3, 4]], dtype=np.float32))
# check the case where points are the same on X axis
Xs.append(np.array([[1.0, 2.0], [1.0, 3.0]], dtype=np.float32))
# check the case where points are arbitrarily close on X axis
Xs.append(np.array([[1.00001, 2.0], [1.00002, 3.0]], dtype=np.float32))
# check the case where points are the same on Y axis
Xs.append(np.array([[1.0, 2.0], [3.0, 2.0]], dtype=np.float32))
# check the case where points are arbitrarily close on Y axis
Xs.append(np.array([[1.0, 2.00001], [3.0, 2.00002]], dtype=np.float32))
# check the case where points are arbitrarily close on both axes
Xs.append(np.array([[1.00001, 2.00001], [1.00002, 2.00002]],
dtype=np.float32))
# check the case where points are arbitrarily close on both axes
# close to machine epsilon - x axis
Xs.append(np.array([[1, 0.0003817754041], [2, 0.0003817753750]],
dtype=np.float32))
# check the case where points are arbitrarily close on both axes
# close to machine epsilon - y axis
Xs.append(np.array([[0.0003817754041, 1.0], [0.0003817753750, 2.0]],
dtype=np.float32))
for X in Xs:
tree = _QuadTree(n_dimensions=2, verbose=0)
tree.build_tree(X)
tree._check_coherence()
def test_quad_tree_pickle():
rng = check_random_state(0)
for n_dimensions in (2, 3):
X = rng.random_sample((10, n_dimensions))
tree = _QuadTree(n_dimensions=n_dimensions, verbose=0)
tree.build_tree(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(tree, protocol=protocol)
bt2 = pickle.loads(s)
for x in X:
cell_x_tree = tree.get_cell(x)
cell_x_bt2 = bt2.get_cell(x)
assert cell_x_tree == cell_x_bt2
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_qt_insert_duplicate():
rng = check_random_state(0)
def check_insert_duplicate(n_dimensions=2):
X = rng.random_sample((10, n_dimensions))
Xd = np.r_[X, X[:5]]
tree = _QuadTree(n_dimensions=n_dimensions, verbose=0)
tree.build_tree(Xd)
cumulative_size = tree.cumulative_size
leafs = tree.leafs
# Assert that the first 5 are indeed duplicated and that the next
# ones are single point leaf
for i, x in enumerate(X):
cell_id = tree.get_cell(x)
assert leafs[cell_id]
assert cumulative_size[cell_id] == 1 + (i < 5)
for n_dimensions in (2, 3):
yield check_insert_duplicate, n_dimensions
def test_summarize():
_QuadTree.test_summarize()
|
bsd-3-clause
|
philipan/paparazzi
|
sw/airborne/test/ahrs/ahrs_utils.py
|
86
|
4923
|
#! /usr/bin/env python
# Copyright (C) 2011 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from __future__ import print_function
import subprocess
import numpy as np
import matplotlib.pyplot as plt
def run_simulation(ahrs_type, build_opt, traj_nb):
print("\nBuilding ahrs")
args = ["make", "clean", "run_ahrs_on_synth", "AHRS_TYPE=AHRS_TYPE_" + ahrs_type] + build_opt
#print(args)
p = subprocess.Popen(args=args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)
outputlines = p.stdout.readlines()
p.wait()
for i in outputlines:
print(" # " + i, end=' ')
print()
print("Running simulation")
print(" using traj " + str(traj_nb))
p = subprocess.Popen(args=["./run_ahrs_on_synth", str(traj_nb)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=False)
outputlines = p.stdout.readlines()
p.wait()
# for i in outputlines:
# print(" "+i, end=' ')
# print("\n")
ahrs_data_type = [('time', 'float32'),
('phi_true', 'float32'), ('theta_true', 'float32'), ('psi_true', 'float32'),
('p_true', 'float32'), ('q_true', 'float32'), ('r_true', 'float32'),
('bp_true', 'float32'), ('bq_true', 'float32'), ('br_true', 'float32'),
('phi_ahrs', 'float32'), ('theta_ahrs', 'float32'), ('psi_ahrs', 'float32'),
('p_ahrs', 'float32'), ('q_ahrs', 'float32'), ('r_ahrs', 'float32'),
('bp_ahrs', 'float32'), ('bq_ahrs', 'float32'), ('br_ahrs', 'float32')]
mydescr = np.dtype(ahrs_data_type)
data = [[] for dummy in xrange(len(mydescr))]
# import code; code.interact(local=locals())
for line in outputlines:
if line.startswith("#"):
print(" " + line, end=' ')
else:
fields = line.strip().split(' ')
#print(fields)
for i, number in enumerate(fields):
data[i].append(number)
print()
for i in xrange(len(mydescr)):
data[i] = np.cast[mydescr[i]](data[i])
return np.rec.array(data, dtype=mydescr)
def plot_simulation_results(plot_true_state, lsty, label, sim_res):
print("Plotting Results")
# f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)
plt.subplot(3, 3, 1)
plt.plot(sim_res.time, sim_res.phi_ahrs, lsty, label=label)
plt.ylabel('degres')
plt.title('phi')
plt.legend()
plt.subplot(3, 3, 2)
plt.plot(sim_res.time, sim_res.theta_ahrs, lsty)
plt.title('theta')
plt.subplot(3, 3, 3)
plt.plot(sim_res.time, sim_res.psi_ahrs, lsty)
plt.title('psi')
plt.subplot(3, 3, 4)
plt.plot(sim_res.time, sim_res.p_ahrs, lsty)
plt.ylabel('degres/s')
plt.title('p')
plt.subplot(3, 3, 5)
plt.plot(sim_res.time, sim_res.q_ahrs, lsty)
plt.title('q')
plt.subplot(3, 3, 6)
plt.plot(sim_res.time, sim_res.r_ahrs, lsty)
plt.title('r')
plt.subplot(3, 3, 7)
plt.plot(sim_res.time, sim_res.bp_ahrs, lsty)
plt.ylabel('degres/s')
plt.xlabel('time in s')
plt.title('bp')
plt.subplot(3, 3, 8)
plt.plot(sim_res.time, sim_res.bq_ahrs, lsty)
plt.xlabel('time in s')
plt.title('bq')
plt.subplot(3, 3, 9)
plt.plot(sim_res.time, sim_res.br_ahrs, lsty)
plt.xlabel('time in s')
plt.title('br')
if plot_true_state:
plt.subplot(3, 3, 1)
plt.plot(sim_res.time, sim_res.phi_true, 'r--')
plt.subplot(3, 3, 2)
plt.plot(sim_res.time, sim_res.theta_true, 'r--')
plt.subplot(3, 3, 3)
plt.plot(sim_res.time, sim_res.psi_true, 'r--')
plt.subplot(3, 3, 4)
plt.plot(sim_res.time, sim_res.p_true, 'r--')
plt.subplot(3, 3, 5)
plt.plot(sim_res.time, sim_res.q_true, 'r--')
plt.subplot(3, 3, 6)
plt.plot(sim_res.time, sim_res.r_true, 'r--')
plt.subplot(3, 3, 7)
plt.plot(sim_res.time, sim_res.bp_true, 'r--')
plt.subplot(3, 3, 8)
plt.plot(sim_res.time, sim_res.bq_true, 'r--')
plt.subplot(3, 3, 9)
plt.plot(sim_res.time, sim_res.br_true, 'r--')
def show_plot():
plt.show()
|
gpl-2.0
|
kcavagnolo/astroML
|
book_figures/appendix/fig_neural_network.py
|
3
|
3090
|
"""
Neural Network Diagram
----------------------
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
fig = plt.figure(figsize=(5, 3.75), facecolor='w')
ax = fig.add_axes([0, 0, 1, 1],
xticks=[], yticks=[])
plt.box(False)
circ = plt.Circle((1, 1), 2)
radius = 0.3
arrow_kwargs = dict(head_width=0.05, fc='black')
# function to draw arrows
def draw_connecting_arrow(ax, circ1, rad1, circ2, rad2):
theta = np.arctan2(circ2[1] - circ1[1],
circ2[0] - circ1[0])
starting_point = (circ1[0] + rad1 * np.cos(theta),
circ1[1] + rad1 * np.sin(theta))
length = (circ2[0] - circ1[0] - (rad1 + 1.4 * rad2) * np.cos(theta),
circ2[1] - circ1[1] - (rad1 + 1.4 * rad2) * np.sin(theta))
ax.arrow(starting_point[0], starting_point[1],
length[0], length[1], **arrow_kwargs)
# function to draw circles
def draw_circle(ax, center, radius):
circ = plt.Circle(center, radius, fc='none', lw=2)
ax.add_patch(circ)
x1 = -2
x2 = 0
x3 = 2
y3 = 0
#------------------------------------------------------------
# draw circles
for i, y1 in enumerate(np.linspace(1.5, -1.5, 4)):
draw_circle(ax, (x1, y1), radius)
ax.text(x1 - 0.9, y1, 'Input #%i' % (i + 1),
ha='right', va='center', fontsize=16)
draw_connecting_arrow(ax, (x1 - 0.9, y1), 0.1, (x1, y1), radius)
for y2 in np.linspace(-2, 2, 5):
draw_circle(ax, (x2, y2), radius)
draw_circle(ax, (x3, y3), radius)
ax.text(x3 + 0.8, y3, 'Output', ha='left', va='center', fontsize=16)
draw_connecting_arrow(ax, (x3, y3), radius, (x3 + 0.8, y3), 0.1)
#------------------------------------------------------------
# draw connecting arrows
for y1 in np.linspace(-1.5, 1.5, 4):
for y2 in np.linspace(-2, 2, 5):
draw_connecting_arrow(ax, (x1, y1), radius, (x2, y2), radius)
for y2 in np.linspace(-2, 2, 5):
draw_connecting_arrow(ax, (x2, y2), radius, (x3, y3), radius)
#------------------------------------------------------------
# Add text labels
plt.text(x1, 2.7, "Input\nLayer", ha='center', va='top', fontsize=16)
plt.text(x2, 2.7, "Hidden Layer", ha='center', va='top', fontsize=16)
plt.text(x3, 2.7, "Output\nLayer", ha='center', va='top', fontsize=16)
ax.set_aspect('equal')
plt.xlim(-4, 4)
plt.ylim(-3, 3)
plt.show()
|
bsd-2-clause
|
jseabold/scikit-learn
|
examples/neighbors/plot_classification.py
|
287
|
1790
|
"""
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
|
bsd-3-clause
|
mojoboss/scikit-learn
|
examples/exercises/plot_cv_digits.py
|
232
|
1206
|
"""
=============================================
Cross-validation on Digits Dataset Exercise
=============================================
A tutorial exercise using Cross-validation with an SVM on the Digits dataset.
This exercise is used in the :ref:`cv_generators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
from sklearn import cross_validation, datasets, svm
digits = datasets.load_digits()
X = digits.data
y = digits.target
svc = svm.SVC(kernel='linear')
C_s = np.logspace(-10, 0, 10)
scores = list()
scores_std = list()
for C in C_s:
svc.C = C
this_scores = cross_validation.cross_val_score(svc, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
# Do the plotting
import matplotlib.pyplot as plt
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.semilogx(C_s, scores)
plt.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--')
plt.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--')
locs, labels = plt.yticks()
plt.yticks(locs, list(map(lambda x: "%g" % x, locs)))
plt.ylabel('CV score')
plt.xlabel('Parameter C')
plt.ylim(0, 1.1)
plt.show()
|
bsd-3-clause
|
google/jax-cfd
|
jax_cfd/data/visualization.py
|
1
|
4950
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Visualization utilities."""
from typing import Any, BinaryIO, Callable, Optional, List, Tuple, Union
from jax_cfd.base import grids
import matplotlib as mpl
import matplotlib.cm as cm
import numpy as np
import PIL.Image as Image
import seaborn as sns
NormFn = Callable[[grids.Array, int], mpl.colors.Normalize]
def quantile_normalize_fn(
image_data: grids.Array,
image_num: int,
quantile: float = 0.999
) -> mpl.colors.Normalize:
"""Returns `mpl.colors.Normalize` object that range defined by data quantile.
Args:
image_data: data for which `Normalize` object is produced.
image_num: number of frame in the series. Not used.
quantile: quantile that should be included in the range.
Returns:
`mpl.colors.Normalize` that covers the range of values that include quantile
of `image_data` values.
"""
del image_num # not used by `quantile_normalize_fn`.
max_to_include = np.quantile(abs(image_data), quantile)
norm = mpl.colors.Normalize(vmin=-max_to_include, vmax=max_to_include)
return norm
def resize_image(
image: Image.Image,
longest_side: int,
resample: int = Image.NEAREST,
) -> Image.Image:
"""Resize an image, preserving its aspect ratio."""
resize_factor = longest_side / max(image.size)
new_size = tuple(round(s * resize_factor) for s in image.size)
return image.resize(new_size, resample)
def trajectory_to_images(
trajectory: grids.Array,
compute_norm_fn: NormFn = quantile_normalize_fn,
cmap: mpl.colors.ListedColormap = sns.cm.icefire,
longest_side: Optional[int] = None,
) -> List[Image.Image]:
"""Converts scalar trajectory with leading time axis into a list of images."""
images = []
for i, image_data in enumerate(trajectory):
norm = compute_norm_fn(image_data, i)
mappable = cm.ScalarMappable(norm=norm, cmap=cmap)
img = Image.fromarray(mappable.to_rgba(image_data, bytes=True))
if longest_side is not None:
img = resize_image(img, longest_side)
images.append(img)
return images
# TODO(dkochkov) consider generalizing this to a general facet.
def horizontal_facet(
separate_images: List[List[Image.Image]],
relative_separation_width: float,
separation_rgb: Tuple[int, int, int] = (255, 255, 255)
) -> List[Image.Image]:
"""Stitches separate images into a single one with a separation strip.
Args:
separate_images: lists of images each representing time series. All images
must have the same size.
relative_separation_width: width of the separation defined as a fraction of
a separate image.
separation_rgb: rgb color code of the separation strip to add between
adjacent images.
Returns:
list of merged images that contain images passed as `separate_images` with
a separating strip.
"""
images = []
for frames in zip(*separate_images):
images_to_combine = len(frames)
separation_width = round(frames[0].width * relative_separation_width)
image_height = frames[0].height
image_width = (frames[0].width * images_to_combine +
separation_width * (images_to_combine - 1))
full_im = Image.new('RGB', (image_width, image_height))
sep_im = Image.new('RGB', (separation_width, image_height), separation_rgb)
full_im = Image.new('RGB', (image_width, image_height))
width_offset = 0
height_offset = 0
for frame in frames:
full_im.paste(frame, (width_offset, height_offset))
width_offset += frame.width
if width_offset < full_im.width:
full_im.paste(sep_im, (width_offset, height_offset))
width_offset += sep_im.width
images.append(full_im)
return images
def save_movie(
images: List[Image.Image],
output_path: Union[str, BinaryIO],
duration: float = 150.,
loop: int = 0,
**kwargs: Any
):
"""Saves `images` as a movie of duration `duration` to `output_path`.
Args:
images: list of images representing time series that will be saved as movie.
output_path: file handle or cns path to where save the movie.
duration: duration of the movie in milliseconds.
loop: number of times to loop the movie. 0 interpreted as indefinite.
**kwargs: additional keyword arguments to be passed to `Image.save`.
"""
images[0].save(output_path, save_all=True, append_images=images[1:],
duration=duration, loop=loop, **kwargs)
|
apache-2.0
|
rrohan/scikit-learn
|
sklearn/preprocessing/label.py
|
137
|
27165
|
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The ``multilabel_`` attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The ``indicator_matrix_`` attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
|
bsd-3-clause
|
mixturemodel-flow/tensorflow
|
tensorflow/contrib/learn/python/learn/preprocessing/tests/categorical_test.py
|
137
|
2219
|
# encoding: utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical tests."""
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.preprocessing import categorical
from tensorflow.python.platform import test
class CategoricalTest(test.TestCase):
"""Categorical tests."""
def testSingleCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=1)
x = cat_processor.fit_transform([["0"], [1], [float("nan")], ["C"], ["C"],
[1], ["0"], [np.nan], [3]])
self.assertAllEqual(list(x), [[2], [1], [0], [3], [3], [1], [2], [0], [0]])
def testSingleCategoricalProcessorPandasSingleDF(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
cat_processor = categorical.CategoricalProcessor()
data = pd.DataFrame({"Gender": ["Male", "Female", "Male"]})
x = list(cat_processor.fit_transform(data))
self.assertAllEqual(list(x), [[1], [2], [1]])
def testMultiCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(
min_frequency=0, share=False)
x = cat_processor.fit_transform([["0", "Male"], [1, "Female"],
["3", "Male"]])
self.assertAllEqual(list(x), [[1, 1], [2, 2], [3, 1]])
if __name__ == "__main__":
test.main()
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.