repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
mlyundin/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py | 254 | 2795 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
print(grid_search.grid_scores_)
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
mfjb/scikit-learn | sklearn/neural_network/rbm.py | 206 | 12292 | """Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <[email protected]>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hiddens. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='fortran')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='fortran')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
| bsd-3-clause |
OshynSong/scikit-learn | examples/svm/plot_svm_nonlinear.py | 268 | 1091 | """
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
| bsd-3-clause |
pravsripad/mne-python | tutorials/time-freq/plot_sensors_time_frequency.py | 10 | 8158 | """
.. _tut-sensors-time-freq:
============================================
Frequency and time-frequency sensor analysis
============================================
The objective is to show you how to explore the spectral content
of your data (frequency and time-frequency). Here we'll work on Epochs.
We will use this dataset: :ref:`somato-dataset`. It contains so-called event
related synchronizations (ERS) / desynchronizations (ERD) in the beta band.
""" # noqa: E501
# Authors: Alexandre Gramfort <[email protected]>
# Stefan Appelhoff <[email protected]>
# Richard Höchenberger <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import tfr_morlet, psd_multitaper, psd_welch
from mne.datasets import somato
###############################################################################
# Set parameters
data_path = somato.data_path()
subject = '01'
task = 'somato'
raw_fname = op.join(data_path, 'sub-{}'.format(subject), 'meg',
'sub-{}_task-{}_meg.fif'.format(subject, task))
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True, stim=False)
# Construct Epochs
event_id, tmin, tmax = 1, -1., 3.
baseline = (None, 0)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=baseline, reject=dict(grad=4000e-13, eog=350e-6),
preload=True)
epochs.resample(200., npad='auto') # resample to reduce computation time
###############################################################################
# Frequency analysis
# ------------------
#
# We start by exploring the frequence content of our epochs.
###############################################################################
# Let's first check out all channel types by averaging across epochs.
epochs.plot_psd(fmin=2., fmax=40., average=True, spatial_colors=False)
###############################################################################
# Now let's take a look at the spatial distributions of the PSD.
epochs.plot_psd_topomap(ch_type='grad', normalize=True)
###############################################################################
# Alternatively, you can also create PSDs from Epochs objects with functions
# that start with ``psd_`` such as
# :func:`mne.time_frequency.psd_multitaper` and
# :func:`mne.time_frequency.psd_welch`.
f, ax = plt.subplots()
psds, freqs = psd_multitaper(epochs, fmin=2, fmax=40, n_jobs=1)
psds = 10. * np.log10(psds)
psds_mean = psds.mean(0).mean(0)
psds_std = psds.mean(0).std(0)
ax.plot(freqs, psds_mean, color='k')
ax.fill_between(freqs, psds_mean - psds_std, psds_mean + psds_std,
color='k', alpha=.5)
ax.set(title='Multitaper PSD (gradiometers)', xlabel='Frequency (Hz)',
ylabel='Power Spectral Density (dB)')
plt.show()
###############################################################################
# Notably, :func:`mne.time_frequency.psd_welch` supports the keyword argument
# ``average``, which specifies how to estimate the PSD based on the individual
# windowed segments. The default is ``average='mean'``, which simply calculates
# the arithmetic mean across segments. Specifying ``average='median'``, in
# contrast, returns the PSD based on the median of the segments (corrected for
# bias relative to the mean), which is a more robust measure.
# Estimate PSDs based on "mean" and "median" averaging for comparison.
kwargs = dict(fmin=2, fmax=40, n_jobs=1)
psds_welch_mean, freqs_mean = psd_welch(epochs, average='mean', **kwargs)
psds_welch_median, freqs_median = psd_welch(epochs, average='median', **kwargs)
# Convert power to dB scale.
psds_welch_mean = 10 * np.log10(psds_welch_mean)
psds_welch_median = 10 * np.log10(psds_welch_median)
# We will only plot the PSD for a single sensor in the first epoch.
ch_name = 'MEG 0122'
ch_idx = epochs.info['ch_names'].index(ch_name)
epo_idx = 0
_, ax = plt.subplots()
ax.plot(freqs_mean, psds_welch_mean[epo_idx, ch_idx, :], color='k',
ls='-', label='mean of segments')
ax.plot(freqs_median, psds_welch_median[epo_idx, ch_idx, :], color='k',
ls='--', label='median of segments')
ax.set(title='Welch PSD ({}, Epoch {})'.format(ch_name, epo_idx),
xlabel='Frequency (Hz)', ylabel='Power Spectral Density (dB)')
ax.legend(loc='upper right')
plt.show()
###############################################################################
# Lastly, we can also retrieve the unaggregated segments by passing
# ``average=None`` to :func:`mne.time_frequency.psd_welch`. The dimensions of
# the returned array are ``(n_epochs, n_sensors, n_freqs, n_segments)``.
psds_welch_unagg, freqs_unagg = psd_welch(epochs, average=None, **kwargs)
print(psds_welch_unagg.shape)
###############################################################################
# .. _inter-trial-coherence:
#
# Time-frequency analysis: power and inter-trial coherence
# --------------------------------------------------------
#
# We now compute time-frequency representations (TFRs) from our Epochs.
# We'll look at power and inter-trial coherence (ITC).
#
# To this we'll use the function :func:`mne.time_frequency.tfr_morlet`
# but you can also use :func:`mne.time_frequency.tfr_multitaper`
# or :func:`mne.time_frequency.tfr_stockwell`.
#
# .. note::
# The ``decim`` parameter reduces the sampling rate of the time-frequency
# decomposition by the defined factor. This is usually done to reduce
# memory usage. For more information refer to the documentation of
# :func:`mne.time_frequency.tfr_morlet`.
#
# define frequencies of interest (log-spaced)
freqs = np.logspace(*np.log10([6, 35]), num=8)
n_cycles = freqs / 2. # different number of cycle per frequency
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=True,
return_itc=True, decim=3, n_jobs=1)
###############################################################################
# Inspect power
# -------------
#
# .. note::
# The generated figures are interactive. In the topo you can click
# on an image to visualize the data for one sensor.
# You can also select a portion in the time-frequency plane to
# obtain a topomap for a certain time-frequency region.
power.plot_topo(baseline=(-0.5, 0), mode='logratio', title='Average power')
power.plot([82], baseline=(-0.5, 0), mode='logratio', title=power.ch_names[82])
fig, axis = plt.subplots(1, 2, figsize=(7, 4))
power.plot_topomap(ch_type='grad', tmin=0.5, tmax=1.5, fmin=8, fmax=12,
baseline=(-0.5, 0), mode='logratio', axes=axis[0],
title='Alpha', show=False)
power.plot_topomap(ch_type='grad', tmin=0.5, tmax=1.5, fmin=13, fmax=25,
baseline=(-0.5, 0), mode='logratio', axes=axis[1],
title='Beta', show=False)
mne.viz.tight_layout()
plt.show()
###############################################################################
# Joint Plot
# ----------
# You can also create a joint plot showing both the aggregated TFR
# across channels and topomaps at specific times and frequencies to obtain
# a quick overview regarding oscillatory effects across time and space.
power.plot_joint(baseline=(-0.5, 0), mode='mean', tmin=-.5, tmax=2,
timefreqs=[(.5, 10), (1.3, 8)])
###############################################################################
# Inspect ITC
# -----------
itc.plot_topo(title='Inter-Trial coherence', vmin=0., vmax=1., cmap='Reds')
###############################################################################
# .. note::
# Baseline correction can be applied to power or done in plots.
# To illustrate the baseline correction in plots, the next line is
# commented power.apply_baseline(baseline=(-0.5, 0), mode='logratio')
#
# Exercise
# --------
#
# - Visualize the inter-trial coherence values as topomaps as done with
# power.
| bsd-3-clause |
tensorflow/models | research/object_detection/metrics/oid_vrd_challenge_evaluation.py | 3 | 5841 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Runs evaluation using OpenImages groundtruth and predictions.
Example usage:
python \
models/research/object_detection/metrics/oid_vrd_challenge_evaluation.py \
--input_annotations_vrd=/path/to/input/annotations-human-bbox.csv \
--input_annotations_labels=/path/to/input/annotations-label.csv \
--input_class_labelmap=/path/to/input/class_labelmap.pbtxt \
--input_relationship_labelmap=/path/to/input/relationship_labelmap.pbtxt \
--input_predictions=/path/to/input/predictions.csv \
--output_metrics=/path/to/output/metric.csv \
CSVs with bounding box annotations and image label (including the image URLs)
can be downloaded from the Open Images Challenge website:
https://storage.googleapis.com/openimages/web/challenge.html
The format of the input csv and the metrics itself are described on the
challenge website.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import pandas as pd
from google.protobuf import text_format
from object_detection.metrics import io_utils
from object_detection.metrics import oid_vrd_challenge_evaluation_utils as utils
from object_detection.protos import string_int_label_map_pb2
from object_detection.utils import vrd_evaluation
def _load_labelmap(labelmap_path):
"""Loads labelmap from the labelmap path.
Args:
labelmap_path: Path to the labelmap.
Returns:
A dictionary mapping class name to class numerical id.
"""
label_map = string_int_label_map_pb2.StringIntLabelMap()
with open(labelmap_path, 'r') as fid:
label_map_string = fid.read()
text_format.Merge(label_map_string, label_map)
labelmap_dict = {}
for item in label_map.item:
labelmap_dict[item.name] = item.id
return labelmap_dict
def _swap_labelmap_dict(labelmap_dict):
"""Swaps keys and labels in labelmap.
Args:
labelmap_dict: Input dictionary.
Returns:
A dictionary mapping class name to class numerical id.
"""
return dict((v, k) for k, v in labelmap_dict.iteritems())
def main(parsed_args):
all_box_annotations = pd.read_csv(parsed_args.input_annotations_boxes)
all_label_annotations = pd.read_csv(parsed_args.input_annotations_labels)
all_annotations = pd.concat([all_box_annotations, all_label_annotations])
class_label_map = _load_labelmap(parsed_args.input_class_labelmap)
relationship_label_map = _load_labelmap(
parsed_args.input_relationship_labelmap)
relation_evaluator = vrd_evaluation.VRDRelationDetectionEvaluator()
phrase_evaluator = vrd_evaluation.VRDPhraseDetectionEvaluator()
for _, groundtruth in enumerate(all_annotations.groupby('ImageID')):
image_id, image_groundtruth = groundtruth
groundtruth_dictionary = utils.build_groundtruth_vrd_dictionary(
image_groundtruth, class_label_map, relationship_label_map)
relation_evaluator.add_single_ground_truth_image_info(
image_id, groundtruth_dictionary)
phrase_evaluator.add_single_ground_truth_image_info(image_id,
groundtruth_dictionary)
all_predictions = pd.read_csv(parsed_args.input_predictions)
for _, prediction_data in enumerate(all_predictions.groupby('ImageID')):
image_id, image_predictions = prediction_data
prediction_dictionary = utils.build_predictions_vrd_dictionary(
image_predictions, class_label_map, relationship_label_map)
relation_evaluator.add_single_detected_image_info(image_id,
prediction_dictionary)
phrase_evaluator.add_single_detected_image_info(image_id,
prediction_dictionary)
relation_metrics = relation_evaluator.evaluate(
relationships=_swap_labelmap_dict(relationship_label_map))
phrase_metrics = phrase_evaluator.evaluate(
relationships=_swap_labelmap_dict(relationship_label_map))
with open(parsed_args.output_metrics, 'w') as fid:
io_utils.write_csv(fid, relation_metrics)
io_utils.write_csv(fid, phrase_metrics)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=
'Evaluate Open Images Visual Relationship Detection predictions.')
parser.add_argument(
'--input_annotations_vrd',
required=True,
help='File with groundtruth vrd annotations.')
parser.add_argument(
'--input_annotations_labels',
required=True,
help='File with groundtruth labels annotations')
parser.add_argument(
'--input_predictions',
required=True,
help="""File with detection predictions; NOTE: no postprocessing is
applied in the evaluation script.""")
parser.add_argument(
'--input_class_labelmap',
required=True,
help="""OpenImages Challenge labelmap; note: it is expected to include
attributes.""")
parser.add_argument(
'--input_relationship_labelmap',
required=True,
help="""OpenImages Challenge relationship labelmap.""")
parser.add_argument(
'--output_metrics', required=True, help='Output file with csv metrics')
args = parser.parse_args()
main(args)
| apache-2.0 |
larsmans/scikit-learn | examples/cluster/plot_cluster_iris.py | 350 | 2593 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
| bsd-3-clause |
travisfcollins/gnuradio | gr-filter/examples/chirp_channelize.py | 58 | 7169 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys, time
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
from scipy import fftpack
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 200000 # number of samples to use
self._fs = 9000 # initial sampling rate
self._M = 9 # Number of channels to channelize
# Create a set of taps for the PFB channelizer
self._taps = filter.firdes.low_pass_2(1, self._fs, 500, 20,
attenuation_dB=10,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
repeated = True
if(repeated):
self.vco_input = analog.sig_source_f(self._fs, analog.GR_SIN_WAVE, 0.25, 110)
else:
amp = 100
data = scipy.arange(0, amp, amp/float(self._N))
self.vco_input = blocks.vector_source_f(data, False)
# Build a VCO controlled by either the sinusoid or single chirp tone
# Then convert this to a complex signal
self.vco = blocks.vco_f(self._fs, 225, 1)
self.f2c = blocks.float_to_complex()
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct the channelizer filter
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps)
# Construct a vector sink for the input signal to the channelizer
self.snk_i = blocks.vector_sink_c()
# Connect the blocks
self.connect(self.vco_input, self.vco, self.f2c)
self.connect(self.f2c, self.head, self.pfb)
self.connect(self.f2c, self.snk_i)
# Create a vector sink for each of M output channels of the filter and connect it
self.snks = list()
for i in xrange(self._M):
self.snks.append(blocks.vector_sink_c())
self.connect((self.pfb, i), self.snks[i])
def main():
tstart = time.time()
tb = pfb_top_block()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig_in = pylab.figure(1, figsize=(16,9), facecolor="w")
fig1 = pylab.figure(2, figsize=(16,9), facecolor="w")
fig2 = pylab.figure(3, figsize=(16,9), facecolor="w")
fig3 = pylab.figure(4, figsize=(16,9), facecolor="w")
Ns = 650
Ne = 20000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._fs
# Plot the input signal on its own figure
d = tb.snk_i.data()[Ns:Ne]
spin_f = fig_in.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
pin_f = spin_f.plot(f_in, X_in, "b")
spin_f.set_xlim([min(f_in), max(f_in)+1])
spin_f.set_ylim([-200.0, 50.0])
spin_f.set_title("Input Signal", weight="bold")
spin_f.set_xlabel("Frequency (Hz)")
spin_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
spin_t = fig_in.add_subplot(2, 1, 2)
pin_t = spin_t.plot(t_in, x_in.real, "b")
pin_t = spin_t.plot(t_in, x_in.imag, "r")
spin_t.set_xlabel("Time (s)")
spin_t.set_ylabel("Amplitude")
Ncols = int(scipy.floor(scipy.sqrt(tb._M)))
Nrows = int(scipy.floor(tb._M / Ncols))
if(tb._M % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = tb._fs / tb._M
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
for i in xrange(len(tb.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = tb.snks[i].data()[Ns:Ne]
sp1_f = fig1.add_subplot(Nrows, Ncols, 1+i)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(X))
f_o = freq
p2_f = sp1_f.plot(f_o, X_o, "b")
sp1_f.set_xlim([min(f_o), max(f_o)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title(("Channel %d" % i), weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_o = fig2.add_subplot(Nrows, Ncols, 1+i)
p2_o = sp2_o.plot(t_o, x_o.real, "b")
p2_o = sp2_o.plot(t_o, x_o.imag, "r")
sp2_o.set_xlim([min(t_o), max(t_o)+1])
sp2_o.set_ylim([-2, 2])
sp2_o.set_title(("Channel %d" % i), weight="bold")
sp2_o.set_xlabel("Time (s)")
sp2_o.set_ylabel("Amplitude")
sp3 = fig3.add_subplot(1,1,1)
p3 = sp3.plot(t_o, x_o.real)
sp3.set_xlim([min(t_o), max(t_o)+1])
sp3.set_ylim([-2, 2])
sp3.set_title("All Channels")
sp3.set_xlabel("Time (s)")
sp3.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
LichAmnesia/MBSCM | Sequence/generate_graph_file.py | 1 | 8375 | # -*- coding: utf-8 -*-
# @Author: LichAmnesia
# @Date: 2016-11-26 13:20:29
# @Last Modified by: Lich_Amnesia
# @Last Modified time: 2016-12-08 13:03:04
# This is to deal with the comments and generate every day's comments file.
import datetime
import json
import pandas as pd
import collections
import random
# generate a graph file
def read_label_write_label_file():
labels_data = pd.read_csv('moderators_subreddit.csv', delimiter='\t')
moderators_map = {}
subreddit_map = {}
for moderator in labels_data['moderators']:
if moderator not in moderators_map:
moderators_map[moderator] = len(moderators_map) + 1
for subreddit in labels_data['subreddit']:
if subreddit not in subreddit_map:
subreddit_map[subreddit] = len(subreddit_map) + 1
return labels_data, moderators_map, subreddit_map
def read_edge_date(filename='comments_2016-09-01.csv'):
edge_data = pd.read_csv(filename, delimiter='\t')
edge_map = collections.defaultdict(list)
for line_num in range(len(edge_data)):
line = edge_data.loc[line_num]
# print(line['author'])
edge_map[line['link_id']].append(moderators_map[line['author']])
summary = 0
edge_filename = filename.split('.')[0]
label_filename = edge_filename + '_metadata' + '.txt'
edge_filename = edge_filename + '.txt'
edge_file = open(edge_filename, 'w')
label_file = open(label_filename, 'w')
# The node in the edge_list
has_set = set([])
for i in edge_map:
if len(edge_map[i]) > 1:
for j in range(len(edge_map[i]) - 1):
if edge_map[i][j + 1] != edge_map[i][j]:
has_set.add(edge_map[i][j + 1])
has_set.add(edge_map[i][j])
edge_file.write(str(edge_map[i][j + 1]) + ' ' + str(edge_map[i][j]) + '\n')
edge_file.close()
print(has_set)
for line_num in range(len(labels_data)):
line = labels_data.loc[line_num]
if moderators_map[line['moderators']] in has_set:
label_file.write(str(moderators_map[line['moderators']]) + ' ' + str(subreddit_map[line['subreddit']]) + '\n')
label_file.close()
# 只运算两个subreddit的, 然后对一个linkid是一个点
def read_edge_subreddit(filename='comments_2016-09-01.csv'):
# labels_data = pd.read_csv('moderators_subreddit.csv', delimiter='\t')
# moderators_map = {}
# subreddit_map = {}
# for moderator in labels_data['moderators']:
# if moderator not in moderators_map:
# moderators_map[moderator] = len(moderators_map) + 1
# for subreddit in labels_data['subreddit']:
# if subreddit not in subreddit_map:
# subreddit_map[subreddit] = len(subreddit_map) + 1
# return labels_data, moderators_map, subreddit_map
# 读取图,然后把author的评论顺序记下来。同一个author对不同link_id的评论,就是一条边
edge_data = pd.read_csv(filename, delimiter='\t')
edge_map = collections.defaultdict(list)
author_map = collections.defaultdict(list)
link_id_map = {}
node_label_map = {}
for line_num in range(len(edge_data)):
line = edge_data.loc[line_num]
# print(line['author'])
if line['link_id'] not in link_id_map:
link_id_map[line['link_id']] = len(link_id_map) + 1
author_map[line['author']].append(link_id_map[line['link_id']])
if line['subreddit'] not in node_label_map:
node_label_map[line['subreddit']] = len(node_label_map) + 1
print(node_label_map)
summary = 0
edge_filename = filename.split('.')[0]
label_filename = edge_filename + '_metadata' + '.txt'
edge_filename = edge_filename + '.txt'
edge_file = open(edge_filename, 'w')
label_file = open(label_filename, 'w')
# The node in the edge_list
has_set = set([])
for i in author_map:
if len(author_map[i]) > 1:
for j in range(len(author_map[i]) - 1):
if author_map[i][j + 1] != author_map[i][j]:
has_set.add(author_map[i][j + 1])
has_set.add(author_map[i][j])
edge_file.write(str(author_map[i][j]) + ' ' + str(author_map[i][j + 1]) + '\n')
edge_file.close()
print(has_set, len(has_set))
has_wirte = set([])
for line_num in range(len(edge_data)):
line = edge_data.loc[line_num]
node = link_id_map[line['link_id']]
if node in has_set and node not in has_wirte:
has_wirte.add(node)
label_file.write(str(node) + ' ' + str(node_label_map[line['subreddit']]) + '\n')
label_file.close()
# 只运算两个subreddit的, 然后对一个linkid是一个点
def read_edge_subreddit_multi(filename='comments_2016-09-01.csv'):
# labels_data = pd.read_csv('moderators_subreddit.csv', delimiter='\t')
# moderators_map = {}
# subreddit_map = {}
# for moderator in labels_data['moderators']:
# if moderator not in moderators_map:
# moderators_map[moderator] = len(moderators_map) + 1
# for subreddit in labels_data['subreddit']:
# if subreddit not in subreddit_map:
# subreddit_map[subreddit] = len(subreddit_map) + 1
# return labels_data, moderators_map, subreddit_map
# 读取图,然后把author的评论顺序记下来。同一个author对不同link_id的评论,就是一条边
edge_data = pd.read_csv(filename, delimiter='\t')
edge_map = collections.defaultdict(list)
author_map = collections.defaultdict(list)
link_id_map = {}
node_label_map = {}
for line_num in range(len(edge_data)):
line = edge_data.loc[line_num]
if line['subreddit'] == 'Music':
continue
# print(line['author'])
if line['link_id'] not in link_id_map:
link_id_map[line['link_id']] = len(link_id_map) + 1
author_map[line['author']].append(link_id_map[line['link_id']])
if line['subreddit'] not in node_label_map:
node_label_map[line['subreddit']] = len(node_label_map) + 1
print(node_label_map)
summary = 0
edge_filename = filename.split('.')[0]
label_filename = edge_filename + '_metadata' + '.txt'
edge_filename = edge_filename + '.txt'
edge_file = open(edge_filename, 'w')
label_file = open(label_filename, 'w')
# The node in the edge_list
has_set = set([])
edge_set = set([])
edge_map = {}
for i in author_map:
if len(author_map[i]) > 1:
for j in range(len(author_map[i]) - 1):
has_set.add(author_map[i][j + 1])
has_set.add(author_map[i][j])
if (author_map[i][j], author_map[i][j + 1]) not in edge_map:
edge_map[(author_map[i][j], author_map[i][j + 1])] = 1
else:
edge_map[(author_map[i][j], author_map[i][j + 1])] += 1
# for k in range(j + 1, len(author_map[i])):
# if random.randint(1, 1000) >= 5:
# continue
# if author_map[i][j] != author_map[i][k] and (author_map[i][j], author_map[i][k]) not in edge_set:
# has_set.add(author_map[i][j])
# has_set.add(author_map[i][k])
# edge_set.add((author_map[i][j], author_map[i][k]))
# edge_file.write(str(author_map[i][j]) + ' ' + str(author_map[i][k]) + '\n')
for key in edge_map:
a, b = key
print(key)
if edge_map[key] >= 2:
print(key)
edge_file.write(str(a) + ' ' + str(b) + ' ' + str(edge_map[key]) + '\n')
edge_file.close()
print(has_set, len(has_set))
# has_wirte = set([])
# for line_num in range(len(edge_data)):
# line = edge_data.loc[line_num]
# node = link_id_map[line['link_id']]
# if node in has_set and node not in has_wirte:
# has_wirte.add(node)
# label_file.write(str(node) + ' ' + str(node_label_map[line['subreddit']]) + '\n')
# label_file.close()
# labels_data, moderators_map, subreddit_map = read_label_write_label_file()
def main():
# read_edge_date()
read_edge_subreddit_multi(filename='comments_subreddit_Music_hillaryclinton.csv')
if __name__ == '__main__':
main() | mit |
joshloyal/scikit-learn | sklearn/gaussian_process/tests/test_gaussian_process.py | 46 | 7057 | """
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater, assert_true, raises
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_batch_size():
# TypeError when using batch_size on Python 3, see
# https://github.com/scikit-learn/scikit-learn/issues/7329 for more
# details
gp = GaussianProcess()
gp.fit(X, y)
gp.predict(X, batch_size=1)
gp.predict(X, batch_size=1, eval_MSE=True)
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
| bsd-3-clause |
jakevdp/scipy | scipy/cluster/vq.py | 2 | 29338 | """
====================================================================
K-means clustering and vector quantization (:mod:`scipy.cluster.vq`)
====================================================================
Provides routines for k-means clustering, generating code books
from k-means models, and quantizing vectors by comparing them with
centroids in a code book.
.. autosummary::
:toctree: generated/
whiten -- Normalize a group of observations so each feature has unit variance
vq -- Calculate code book membership of a set of observation vectors
kmeans -- Performs k-means on a set of observation vectors forming k clusters
kmeans2 -- A different implementation of k-means with more methods
-- for initializing centroids
Background information
======================
The k-means algorithm takes as input the number of clusters to
generate, k, and a set of observation vectors to cluster. It
returns a set of centroids, one for each of the k clusters. An
observation vector is classified with the cluster number or
centroid index of the centroid closest to it.
A vector v belongs to cluster i if it is closer to centroid i than
any other centroids. If v belongs to i, we say centroid i is the
dominating centroid of v. The k-means algorithm tries to
minimize distortion, which is defined as the sum of the squared distances
between each observation vector and its dominating centroid. Each
step of the k-means algorithm refines the choices of centroids to
reduce distortion. The change in distortion is used as a
stopping criterion: when the change is lower than a threshold, the
k-means algorithm is not making sufficient progress and
terminates. One can also define a maximum number of iterations.
Since vector quantization is a natural application for k-means,
information theory terminology is often used. The centroid index
or cluster index is also referred to as a "code" and the table
mapping codes to centroids and vice versa is often referred as a
"code book". The result of k-means, a set of centroids, can be
used to quantize vectors. Quantization aims to find an encoding of
vectors that reduces the expected distortion.
All routines expect obs to be a M by N array where the rows are
the observation vectors. The codebook is a k by N array where the
i'th row is the centroid of code word i. The observation vectors
and centroids have the same feature dimension.
As an example, suppose we wish to compress a 24-bit color image
(each pixel is represented by one byte for red, one for blue, and
one for green) before sending it over the web. By using a smaller
8-bit encoding, we can reduce the amount of data by two
thirds. Ideally, the colors for each of the 256 possible 8-bit
encoding values should be chosen to minimize distortion of the
color. Running k-means with k=256 generates a code book of 256
codes, which fills up all possible 8-bit sequences. Instead of
sending a 3-byte value for each pixel, the 8-bit centroid index
(or code word) of the dominating centroid is transmitted. The code
book is also sent over the wire so each 8-bit code can be
translated back to a 24-bit pixel value representation. If the
image of interest was of an ocean, we would expect many 24-bit
blues to be represented by 8-bit codes. If it was an image of a
human face, more flesh tone colors would be represented in the
code book.
"""
from __future__ import division, print_function, absolute_import
__docformat__ = 'restructuredtext'
__all__ = ['whiten', 'vq', 'kmeans', 'kmeans2']
# TODO:
# - implements high level method for running several times k-means with
# different initialization
# - warning: what happens if different number of clusters ? For now, emit a
# warning, but it is not great, because I am not sure it really make sense to
# succeed in this case (maybe an exception is better ?)
import warnings
import numpy as np
from scipy._lib._util import _asarray_validated
from scipy._lib import _numpy_compat
from . import _vq
class ClusterError(Exception):
pass
def whiten(obs, check_finite=True):
"""
Normalize a group of observations on a per feature basis.
Before running k-means, it is beneficial to rescale each feature
dimension of the observation set with whitening. Each feature is
divided by its standard deviation across all observations to give
it unit variance.
Parameters
----------
obs : ndarray
Each row of the array is an observation. The
columns are the features seen during each observation.
>>> # f0 f1 f2
>>> obs = [[ 1., 1., 1.], #o0
... [ 2., 2., 2.], #o1
... [ 3., 3., 3.], #o2
... [ 4., 4., 4.]] #o3
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
result : ndarray
Contains the values in `obs` scaled by the standard deviation
of each column.
Examples
--------
>>> from scipy.cluster.vq import whiten
>>> features = np.array([[1.9, 2.3, 1.7],
... [1.5, 2.5, 2.2],
... [0.8, 0.6, 1.7,]])
>>> whiten(features)
array([[ 4.17944278, 2.69811351, 7.21248917],
[ 3.29956009, 2.93273208, 9.33380951],
[ 1.75976538, 0.7038557 , 7.21248917]])
"""
obs = _asarray_validated(obs, check_finite=check_finite)
std_dev = np.std(obs, axis=0)
zero_std_mask = std_dev == 0
if zero_std_mask.any():
std_dev[zero_std_mask] = 1.0
warnings.warn("Some columns have standard deviation zero. "
"The values of these columns will not change.",
RuntimeWarning)
return obs / std_dev
def vq(obs, code_book, check_finite=True):
"""
Assign codes from a code book to observations.
Assigns a code from a code book to each observation. Each
observation vector in the 'M' by 'N' `obs` array is compared with the
centroids in the code book and assigned the code of the closest
centroid.
The features in `obs` should have unit variance, which can be
achieved by passing them through the whiten function. The code
book can be created with the k-means algorithm or a different
encoding algorithm.
Parameters
----------
obs : ndarray
Each row of the 'M' x 'N' array is an observation. The columns are
the "features" seen during each observation. The features must be
whitened first using the whiten function or something equivalent.
code_book : ndarray
The code book is usually generated using the k-means algorithm.
Each row of the array holds a different code, and the columns are
the features of the code.
>>> # f0 f1 f2 f3
>>> code_book = [
... [ 1., 2., 3., 4.], #c0
... [ 1., 2., 3., 4.], #c1
... [ 1., 2., 3., 4.]] #c2
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
code : ndarray
A length M array holding the code book index for each observation.
dist : ndarray
The distortion (distance) between the observation and its nearest
code.
Examples
--------
>>> from numpy import array
>>> from scipy.cluster.vq import vq
>>> code_book = array([[1.,1.,1.],
... [2.,2.,2.]])
>>> features = array([[ 1.9,2.3,1.7],
... [ 1.5,2.5,2.2],
... [ 0.8,0.6,1.7]])
>>> vq(features,code_book)
(array([1, 1, 0],'i'), array([ 0.43588989, 0.73484692, 0.83066239]))
"""
obs = _asarray_validated(obs, check_finite=check_finite)
code_book = _asarray_validated(code_book, check_finite=check_finite)
ct = np.common_type(obs, code_book)
c_obs = obs.astype(ct, copy=False)
if code_book.dtype != ct:
c_code_book = code_book.astype(ct)
else:
c_code_book = code_book
if ct in (np.float32, np.float64):
results = _vq.vq(c_obs, c_code_book)
else:
results = py_vq(obs, code_book)
return results
def py_vq(obs, code_book, check_finite=True):
""" Python version of vq algorithm.
The algorithm computes the euclidian distance between each
observation and every frame in the code_book.
Parameters
----------
obs : ndarray
Expects a rank 2 array. Each row is one observation.
code_book : ndarray
Code book to use. Same format than obs. Should have same number of
features (eg columns) than obs.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
code : ndarray
code[i] gives the label of the ith obversation, that its code is
code_book[code[i]].
mind_dist : ndarray
min_dist[i] gives the distance between the ith observation and its
corresponding code.
Notes
-----
This function is slower than the C version but works for
all input types. If the inputs have the wrong types for the
C versions of the function, this one is called as a last resort.
It is about 20 times slower than the C version.
"""
obs = _asarray_validated(obs, check_finite=check_finite)
code_book = _asarray_validated(code_book, check_finite=check_finite)
# n = number of observations
# d = number of features
if np.ndim(obs) == 1:
if not np.ndim(obs) == np.ndim(code_book):
raise ValueError(
"Observation and code_book should have the same rank")
else:
return _py_vq_1d(obs, code_book)
else:
(n, d) = np.shape(obs)
# code books and observations should have same number of features and same
# shape
if not np.ndim(obs) == np.ndim(code_book):
raise ValueError("Observation and code_book should have the same rank")
elif not d == code_book.shape[1]:
raise ValueError("Code book(%d) and obs(%d) should have the same "
"number of features (eg columns)""" %
(code_book.shape[1], d))
code = np.zeros(n, dtype=int)
min_dist = np.zeros(n)
for i in range(n):
dist = np.sum((obs[i] - code_book) ** 2, 1)
code[i] = np.argmin(dist)
min_dist[i] = dist[code[i]]
return code, np.sqrt(min_dist)
def _py_vq_1d(obs, code_book):
""" Python version of vq algorithm for rank 1 only.
Parameters
----------
obs : ndarray
Expects a rank 1 array. Each item is one observation.
code_book : ndarray
Code book to use. Same format than obs. Should rank 1 too.
Returns
-------
code : ndarray
code[i] gives the label of the ith obversation, that its code is
code_book[code[i]].
mind_dist : ndarray
min_dist[i] gives the distance between the ith observation and its
corresponding code.
"""
raise RuntimeError("_py_vq_1d buggy, do not use rank 1 arrays for now")
n = obs.size
nc = code_book.size
dist = np.zeros((n, nc))
for i in range(nc):
dist[:, i] = np.sum(obs - code_book[i])
print(dist)
code = np.argmin(dist)
min_dist = dist[code]
return code, np.sqrt(min_dist)
def py_vq2(obs, code_book, check_finite=True):
"""2nd Python version of vq algorithm.
The algorithm simply computes the euclidian distance between each
observation and every frame in the code_book/
Parameters
----------
obs : ndarray
Expect a rank 2 array. Each row is one observation.
code_book : ndarray
Code book to use. Same format than obs. Should have same number of
features (eg columns) than obs.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
code : ndarray
code[i] gives the label of the ith obversation, that its code is
code_book[code[i]].
mind_dist : ndarray
min_dist[i] gives the distance between the ith observation and its
corresponding code.
Notes
-----
This could be faster when number of codebooks is small, but it
becomes a real memory hog when codebook is large. It requires
N by M by O storage where N=number of obs, M = number of
features, and O = number of codes.
"""
obs = _asarray_validated(obs, check_finite=check_finite)
code_book = _asarray_validated(code_book, check_finite=check_finite)
d = np.shape(obs)[1]
# code books and observations should have same number of features
if not d == code_book.shape[1]:
raise ValueError("""
code book(%d) and obs(%d) should have the same
number of features (eg columns)""" % (code_book.shape[1], d))
diff = obs[np.newaxis, :, :] - code_book[:,np.newaxis,:]
dist = np.sqrt(np.sum(diff * diff, -1))
code = np.argmin(dist, 0)
min_dist = np.minimum.reduce(dist, 0)
# The next line I think is equivalent and should be faster than the one
# above, but in practice didn't seem to make much difference:
# min_dist = choose(code,dist)
return code, min_dist
def _kmeans(obs, guess, thresh=1e-5):
""" "raw" version of k-means.
Returns
-------
code_book
the lowest distortion codebook found.
avg_dist
the average distance a observation is from a code in the book.
Lower means the code_book matches the data better.
See Also
--------
kmeans : wrapper around k-means
Examples
--------
Note: not whitened in this example.
>>> from numpy import array
>>> from scipy.cluster.vq import _kmeans
>>> features = array([[ 1.9,2.3],
... [ 1.5,2.5],
... [ 0.8,0.6],
... [ 0.4,1.8],
... [ 1.0,1.0]])
>>> book = array((features[0],features[2]))
>>> _kmeans(features,book)
(array([[ 1.7 , 2.4 ],
[ 0.73333333, 1.13333333]]), 0.40563916697728591)
"""
code_book = np.array(guess, copy=True)
avg_dist = []
diff = np.inf
while diff > thresh:
nc = code_book.shape[0]
# compute membership and distances between obs and code_book
obs_code, distort = vq(obs, code_book)
avg_dist.append(np.mean(distort, axis=-1))
# recalc code_book as centroids of associated obs
if(diff > thresh):
code_book, has_members = _vq.update_cluster_means(obs, obs_code, nc)
code_book = code_book.compress(has_members, axis=0)
if len(avg_dist) > 1:
diff = avg_dist[-2] - avg_dist[-1]
return code_book, avg_dist[-1]
def kmeans(obs, k_or_guess, iter=20, thresh=1e-5, check_finite=True):
"""
Performs k-means on a set of observation vectors forming k clusters.
The k-means algorithm adjusts the centroids until sufficient
progress cannot be made, i.e. the change in distortion since
the last iteration is less than some threshold. This yields
a code book mapping centroids to codes and vice versa.
Distortion is defined as the sum of the squared differences
between the observations and the corresponding centroid.
Parameters
----------
obs : ndarray
Each row of the M by N array is an observation vector. The
columns are the features seen during each observation.
The features must be whitened first with the `whiten` function.
k_or_guess : int or ndarray
The number of centroids to generate. A code is assigned to
each centroid, which is also the row index of the centroid
in the code_book matrix generated.
The initial k centroids are chosen by randomly selecting
observations from the observation matrix. Alternatively,
passing a k by N array specifies the initial k centroids.
iter : int, optional
The number of times to run k-means, returning the codebook
with the lowest distortion. This argument is ignored if
initial centroids are specified with an array for the
``k_or_guess`` parameter. This parameter does not represent the
number of iterations of the k-means algorithm.
thresh : float, optional
Terminates the k-means algorithm if the change in
distortion since the last k-means iteration is less than
or equal to thresh.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
codebook : ndarray
A k by N array of k centroids. The i'th centroid
codebook[i] is represented with the code i. The centroids
and codes generated represent the lowest distortion seen,
not necessarily the globally minimal distortion.
distortion : float
The distortion between the observations passed and the
centroids generated.
See Also
--------
kmeans2 : a different implementation of k-means clustering
with more methods for generating initial centroids but without
using a distortion change threshold as a stopping criterion.
whiten : must be called prior to passing an observation matrix
to kmeans.
Examples
--------
>>> from numpy import array
>>> from scipy.cluster.vq import vq, kmeans, whiten
>>> import matplotlib.pyplot as plt
>>> features = array([[ 1.9,2.3],
... [ 1.5,2.5],
... [ 0.8,0.6],
... [ 0.4,1.8],
... [ 0.1,0.1],
... [ 0.2,1.8],
... [ 2.0,0.5],
... [ 0.3,1.5],
... [ 1.0,1.0]])
>>> whitened = whiten(features)
>>> book = np.array((whitened[0],whitened[2]))
>>> kmeans(whitened,book)
(array([[ 2.3110306 , 2.86287398], # random
[ 0.93218041, 1.24398691]]), 0.85684700941625547)
>>> from numpy import random
>>> random.seed((1000,2000))
>>> codes = 3
>>> kmeans(whitened,codes)
(array([[ 2.3110306 , 2.86287398], # random
[ 1.32544402, 0.65607529],
[ 0.40782893, 2.02786907]]), 0.5196582527686241)
>>> # Create 50 datapoints in two clusters a and b
>>> pts = 50
>>> a = np.random.multivariate_normal([0, 0], [[4, 1], [1, 4]], size=pts)
>>> b = np.random.multivariate_normal([30, 10],
... [[10, 2], [2, 1]],
... size=pts)
>>> features = np.concatenate((a, b))
>>> # Whiten data
>>> whitened = whiten(features)
>>> # Find 2 clusters in the data
>>> codebook, distortion = kmeans(whitened, 2)
>>> # Plot whitened data and cluster centers in red
>>> plt.scatter(whitened[:, 0], whitened[:, 1])
>>> plt.scatter(codebook[:, 0], codebook[:, 1], c='r')
>>> plt.show()
"""
obs = _asarray_validated(obs, check_finite=check_finite)
if int(iter) < 1:
raise ValueError('iter must be at least 1.')
# Determine whether a count (scalar) or an initial guess (array) was passed.
k = None
guess = None
try:
k = int(k_or_guess)
except TypeError:
guess = _asarray_validated(k_or_guess, check_finite=check_finite)
if guess is not None:
if guess.size < 1:
raise ValueError("Asked for 0 cluster ? initial book was %s" %
guess)
result = _kmeans(obs, guess, thresh=thresh)
else:
if k != k_or_guess:
raise ValueError('if k_or_guess is a scalar, it must be an integer')
# initialize best distance value to a large value
best_dist = np.inf
No = obs.shape[0]
k = k_or_guess
if k < 1:
raise ValueError("Asked for 0 cluster ? ")
for i in range(iter):
# the initial code book is randomly selected from observations
k_random_indices = np.random.randint(0, No, k)
if np.any(_numpy_compat.unique(k_random_indices,
return_counts=True)[1] > 1):
# randint can give duplicates, which is incorrect. Only fix
# the issue if it occurs, to not change results for users who
# use a random seed and get no duplicates.
k_random_indices = np.random.permutation(No)[:k]
guess = np.take(obs, k_random_indices, 0)
book, dist = _kmeans(obs, guess, thresh=thresh)
if dist < best_dist:
best_book = book
best_dist = dist
result = best_book, best_dist
return result
def _kpoints(data, k):
"""Pick k points at random in data (one row = one observation).
This is done by taking the k first values of a random permutation of 1..N
where N is the number of observation.
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 are assumed to describe one
dimensional data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
"""
if data.ndim > 1:
n = data.shape[0]
else:
n = data.size
p = np.random.permutation(n)
x = data[p[:k], :].copy()
return x
def _krandinit(data, k):
"""Returns k samples of a random variable which parameters depend on data.
More precisely, it returns k observations sampled from a Gaussian random
variable which mean and covariances are the one estimated from data.
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 are assumed to describe one
dimensional data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
"""
def init_rank1(data):
mu = np.mean(data)
cov = np.cov(data)
x = np.random.randn(k)
x *= np.sqrt(cov)
x += mu
return x
def init_rankn(data):
mu = np.mean(data, 0)
cov = np.atleast_2d(np.cov(data, rowvar=0))
# k rows, d cols (one row = one obs)
# Generate k sample of a random variable ~ Gaussian(mu, cov)
x = np.random.randn(k, mu.size)
x = np.dot(x, np.linalg.cholesky(cov).T) + mu
return x
def init_rank_def(data):
# initialize when the covariance matrix is rank deficient
mu = np.mean(data, axis=0)
_, s, vh = np.linalg.svd(data - mu, full_matrices=False)
x = np.random.randn(k, s.size)
sVh = s[:, None] * vh / np.sqrt(data.shape[0] - 1)
x = np.dot(x, sVh) + mu
return x
nd = np.ndim(data)
if nd == 1:
return init_rank1(data)
elif data.shape[1] > data.shape[0]:
return init_rank_def(data)
else:
return init_rankn(data)
_valid_init_meth = {'random': _krandinit, 'points': _kpoints}
def _missing_warn():
"""Print a warning when called."""
warnings.warn("One of the clusters is empty. "
"Re-run kmean with a different initialization.")
def _missing_raise():
"""raise a ClusterError when called."""
raise ClusterError("One of the clusters is empty. "
"Re-run kmean with a different initialization.")
_valid_miss_meth = {'warn': _missing_warn, 'raise': _missing_raise}
def kmeans2(data, k, iter=10, thresh=1e-5, minit='random',
missing='warn', check_finite=True):
"""
Classify a set of observations into k clusters using the k-means algorithm.
The algorithm attempts to minimize the Euclidian distance between
observations and centroids. Several initialization methods are
included.
Parameters
----------
data : ndarray
A 'M' by 'N' array of 'M' observations in 'N' dimensions or a length
'M' array of 'M' one-dimensional observations.
k : int or ndarray
The number of clusters to form as well as the number of
centroids to generate. If `minit` initialization string is
'matrix', or if a ndarray is given instead, it is
interpreted as initial cluster to use instead.
iter : int, optional
Number of iterations of the k-means algrithm to run. Note
that this differs in meaning from the iters parameter to
the kmeans function.
thresh : float, optional
(not used yet)
minit : str, optional
Method for initialization. Available methods are 'random',
'points', and 'matrix':
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
'points': choose k observations (rows) at random from data for
the initial centroids.
'matrix': interpret the k parameter as a k by M (or length k
array for one-dimensional data) array of initial centroids.
missing : str, optional
Method to deal with empty clusters. Available methods are
'warn' and 'raise':
'warn': give a warning and continue.
'raise': raise an ClusterError and terminate the algorithm.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
centroid : ndarray
A 'k' by 'N' array of centroids found at the last iteration of
k-means.
label : ndarray
label[i] is the code or index of the centroid the
i'th observation is closest to.
"""
data = _asarray_validated(data, check_finite=check_finite)
if missing not in _valid_miss_meth:
raise ValueError("Unkown missing method: %s" % str(missing))
# If data is rank 1, then we have 1 dimension problem.
nd = np.ndim(data)
if nd == 1:
d = 1
# raise ValueError("Input of rank 1 not supported yet")
elif nd == 2:
d = data.shape[1]
else:
raise ValueError("Input of rank > 2 not supported")
if np.size(data) < 1:
raise ValueError("Input has 0 items.")
# If k is not a single value, then it should be compatible with data's
# shape
if np.size(k) > 1 or minit == 'matrix':
if not nd == np.ndim(k):
raise ValueError("k is not an int and has not same rank than data")
if d == 1:
nc = len(k)
else:
(nc, dc) = k.shape
if not dc == d:
raise ValueError("k is not an int and has not same rank than\
data")
clusters = k.copy()
else:
try:
nc = int(k)
except TypeError:
raise ValueError("k (%s) could not be converted to an integer " % str(k))
if nc < 1:
raise ValueError("kmeans2 for 0 clusters ? (k was %s)" % str(k))
if not nc == k:
warnings.warn("k was not an integer, was converted.")
try:
init = _valid_init_meth[minit]
except KeyError:
raise ValueError("unknown init method %s" % str(minit))
clusters = init(data, k)
if int(iter) < 1:
raise ValueError("iter = %s is not valid. iter must be a positive integer." % iter)
return _kmeans2(data, clusters, iter, nc, _valid_miss_meth[missing])
def _kmeans2(data, code, niter, nc, missing):
""" "raw" version of kmeans2. Do not use directly.
Run k-means with a given initial codebook.
"""
for i in range(niter):
# Compute the nearest neighbour for each obs
# using the current code book
label = vq(data, code)[0]
# Update the code by computing centroids using the new code book
new_code, has_members = _vq.update_cluster_means(data, label, nc)
if not has_members.all():
missing()
# Set the empty clusters to their previous positions
new_code[~has_members] = code[~has_members]
code = new_code
return code, label
| bsd-3-clause |
xubenben/scikit-learn | sklearn/feature_selection/variance_threshold.py | 238 | 2594 | # Author: Lars Buitinck <[email protected]>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
shenzebang/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 182 | 1743 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
amolkahat/pandas | pandas/tests/sparse/test_pivot.py | 21 | 2390 | import numpy as np
import pandas as pd
import pandas.util.testing as tm
class TestPivotTable(object):
def setup_method(self, method):
self.dense = pd.DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8),
'E': [np.nan, np.nan, 1, 2,
np.nan, 1, np.nan, np.nan]})
self.sparse = self.dense.to_sparse()
def test_pivot_table(self):
res_sparse = pd.pivot_table(self.sparse, index='A', columns='B',
values='C')
res_dense = pd.pivot_table(self.dense, index='A', columns='B',
values='C')
tm.assert_frame_equal(res_sparse, res_dense)
res_sparse = pd.pivot_table(self.sparse, index='A', columns='B',
values='E')
res_dense = pd.pivot_table(self.dense, index='A', columns='B',
values='E')
tm.assert_frame_equal(res_sparse, res_dense)
res_sparse = pd.pivot_table(self.sparse, index='A', columns='B',
values='E', aggfunc='mean')
res_dense = pd.pivot_table(self.dense, index='A', columns='B',
values='E', aggfunc='mean')
tm.assert_frame_equal(res_sparse, res_dense)
# ToDo: sum doesn't handle nan properly
# res_sparse = pd.pivot_table(self.sparse, index='A', columns='B',
# values='E', aggfunc='sum')
# res_dense = pd.pivot_table(self.dense, index='A', columns='B',
# values='E', aggfunc='sum')
# tm.assert_frame_equal(res_sparse, res_dense)
def test_pivot_table_multi(self):
res_sparse = pd.pivot_table(self.sparse, index='A', columns='B',
values=['D', 'E'])
res_dense = pd.pivot_table(self.dense, index='A', columns='B',
values=['D', 'E'])
tm.assert_frame_equal(res_sparse, res_dense)
| bsd-3-clause |
herilalaina/scikit-learn | sklearn/cross_decomposition/pls_.py | 7 | 31256 | """
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <[email protected]>
# License: BSD 3 clause
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.linalg import pinv2, svd
from scipy.sparse.linalg import svds
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..utils.extmath import svd_flip
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
from ..externals import six
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
# We use slower pinv2 (same as np.linalg.pinv) for stability
# reasons
X_pinv = pinv2(X, check_finite=False)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# If y_score only has zeros x_weights will only have zeros. In
# this case add an epsilon to converge to a more acceptable
# solution
if np.dot(x_weights.T, x_weights) < eps:
x_weights += eps
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = pinv2(Y, check_finite=False) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
# 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
# y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights : boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_ : array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of predictors.
Y : array-like, shape = [n_samples, n_targets]
Target vectors, where n_samples is the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy,
ensure_min_samples=2)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ = (
_center_scale_xy(X, Y, self.scale))
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
# 1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# Forces sign stability of x_weights and y_weights
# Sign undeterminacy issue from svd if algorithm == "svd"
# and from platform dependent computation if algorithm == 'nipals'
x_weights, y_weights = svd_flip(x_weights, y_weights.T)
y_weights = y_weights.T
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
# 2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
pinv2(np.dot(self.x_loadings_.T, self.x_weights_),
check_finite=False))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
pinv2(np.dot(self.y_loadings_.T, self.y_weights_),
check_finite=False))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = self.coef_ * self.y_std_
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of predictors.
Y : array-like, shape = [n_samples, n_targets]
Target vectors, where n_samples is the number of samples and
n_targets is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy, dtype=FLOAT_DTYPES)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of predictors.
y : array-like, shape = [n_samples, n_targets]
Target vectors, where n_samples is the number of samples and
n_targets is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_ : array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
Matrices::
T: x_scores_
U: y_scores_
W: x_weights_
C: y_weights_
P: x_loadings_
Q: y_loadings__
Are computed such that::
X = T P.T + Err and Y = U Q.T + Err
T[:, k] = Xk W[:, k] for k in range(n_components)
U[:, k] = Yk C[:, k] for k in range(n_components)
x_rotations_ = W (P.T W)^(-1)
y_rotations_ = C (Q.T C)^(-1)
where Xk and Yk are residual matrices at iteration k.
`Slides explaining
PLS <http://www.eigenvector.com/Docs/Wise_pls_properties.pdf>`_
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * std(Xk u) std(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
super(PLSRegression, self).__init__(
n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
Number of components to keep
scale : boolean, (default True)
Option to scale data
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
Matrices::
T: x_scores_
U: y_scores_
W: x_weights_
C: y_weights_
P: x_loadings_
Q: y_loadings__
Are computed such that::
X = T P.T + Err and Y = U Q.T + Err
T[:, k] = Xk W[:, k] for k in range(n_components)
U[:, k] = Yk C[:, k] for k in range(n_components)
x_rotations_ = W (P.T W)^(-1)
y_rotations_ = C (Q.T C)^(-1)
where Xk and Yk are residual matrices at iteration k.
`Slides explaining PLS
<http://www.eigenvector.com/Docs/Wise_pls_properties.pdf>`_
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * std(Xk u) std(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
super(PLSCanonical, self).__init__(
n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of predictors.
Y : array-like, shape = [n_samples, n_targets]
Target vectors, where n_samples is the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy,
ensure_min_samples=2)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d"
" with X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ = (
_center_scale_xy(X, Y, self.scale))
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = svd(C, full_matrices=False)
else:
U, s, V = svds(C, k=self.n_components)
# Deterministic output
U, V = svd_flip(U, V)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""
Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of predictors.
Y : array-like, shape = [n_samples, n_targets]
Target vectors, where n_samples is the number of samples and
n_targets is the number of response variables.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of predictors.
y : array-like, shape = [n_samples, n_targets]
Target vectors, where n_samples is the number of samples and
n_targets is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y).transform(X, y)
| bsd-3-clause |
hvanhovell/spark | python/pyspark/sql/utils.py | 4 | 7817 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import py4j
import sys
if sys.version_info.major >= 3:
unicode = str
class CapturedException(Exception):
def __init__(self, desc, stackTrace, cause=None):
self.desc = desc
self.stackTrace = stackTrace
self.cause = convert_exception(cause) if cause is not None else None
def __str__(self):
desc = self.desc
# encode unicode instance for python2 for human readable description
if sys.version_info.major < 3 and isinstance(desc, unicode):
return str(desc.encode('utf-8'))
else:
return str(desc)
class AnalysisException(CapturedException):
"""
Failed to analyze a SQL query plan.
"""
class ParseException(CapturedException):
"""
Failed to parse a SQL command.
"""
class IllegalArgumentException(CapturedException):
"""
Passed an illegal or inappropriate argument.
"""
class StreamingQueryException(CapturedException):
"""
Exception that stopped a :class:`StreamingQuery`.
"""
class QueryExecutionException(CapturedException):
"""
Failed to execute a query.
"""
class UnknownException(CapturedException):
"""
None of the above exceptions.
"""
def convert_exception(e):
s = e.toString()
stackTrace = '\n\t at '.join(map(lambda x: x.toString(), e.getStackTrace()))
c = e.getCause()
if s.startswith('org.apache.spark.sql.AnalysisException: '):
return AnalysisException(s.split(': ', 1)[1], stackTrace, c)
if s.startswith('org.apache.spark.sql.catalyst.analysis'):
return AnalysisException(s.split(': ', 1)[1], stackTrace, c)
if s.startswith('org.apache.spark.sql.catalyst.parser.ParseException: '):
return ParseException(s.split(': ', 1)[1], stackTrace, c)
if s.startswith('org.apache.spark.sql.streaming.StreamingQueryException: '):
return StreamingQueryException(s.split(': ', 1)[1], stackTrace, c)
if s.startswith('org.apache.spark.sql.execution.QueryExecutionException: '):
return QueryExecutionException(s.split(': ', 1)[1], stackTrace, c)
if s.startswith('java.lang.IllegalArgumentException: '):
return IllegalArgumentException(s.split(': ', 1)[1], stackTrace, c)
return UnknownException(s, stackTrace, c)
def capture_sql_exception(f):
def deco(*a, **kw):
try:
return f(*a, **kw)
except py4j.protocol.Py4JJavaError as e:
converted = convert_exception(e.java_exception)
if not isinstance(converted, UnknownException):
raise converted
else:
raise
return deco
def install_exception_handler():
"""
Hook an exception handler into Py4j, which could capture some SQL exceptions in Java.
When calling Java API, it will call `get_return_value` to parse the returned object.
If any exception happened in JVM, the result will be Java exception object, it raise
py4j.protocol.Py4JJavaError. We replace the original `get_return_value` with one that
could capture the Java exception and throw a Python one (with the same error message).
It's idempotent, could be called multiple times.
"""
original = py4j.protocol.get_return_value
# The original `get_return_value` is not patched, it's idempotent.
patched = capture_sql_exception(original)
# only patch the one used in py4j.java_gateway (call Java API)
py4j.java_gateway.get_return_value = patched
def toJArray(gateway, jtype, arr):
"""
Convert python list to java type array
:param gateway: Py4j Gateway
:param jtype: java type of element in array
:param arr: python type list
"""
jarr = gateway.new_array(jtype, len(arr))
for i in range(0, len(arr)):
jarr[i] = arr[i]
return jarr
def require_minimum_pandas_version():
""" Raise ImportError if minimum version of Pandas is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pandas_version = "0.23.2"
from distutils.version import LooseVersion
try:
import pandas
have_pandas = True
except ImportError:
have_pandas = False
if not have_pandas:
raise ImportError("Pandas >= %s must be installed; however, "
"it was not found." % minimum_pandas_version)
if LooseVersion(pandas.__version__) < LooseVersion(minimum_pandas_version):
raise ImportError("Pandas >= %s must be installed; however, "
"your version was %s." % (minimum_pandas_version, pandas.__version__))
def require_minimum_pyarrow_version():
""" Raise ImportError if minimum version of pyarrow is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pyarrow_version = "0.12.1"
from distutils.version import LooseVersion
try:
import pyarrow
have_arrow = True
except ImportError:
have_arrow = False
if not have_arrow:
raise ImportError("PyArrow >= %s must be installed; however, "
"it was not found." % minimum_pyarrow_version)
if LooseVersion(pyarrow.__version__) < LooseVersion(minimum_pyarrow_version):
raise ImportError("PyArrow >= %s must be installed; however, "
"your version was %s." % (minimum_pyarrow_version, pyarrow.__version__))
def require_test_compiled():
""" Raise Exception if test classes are not compiled
"""
import os
import glob
try:
spark_home = os.environ['SPARK_HOME']
except KeyError:
raise RuntimeError('SPARK_HOME is not defined in environment')
test_class_path = os.path.join(
spark_home, 'sql', 'core', 'target', '*', 'test-classes')
paths = glob.glob(test_class_path)
if len(paths) == 0:
raise RuntimeError(
"%s doesn't exist. Spark sql test classes are not compiled." % test_class_path)
class ForeachBatchFunction(object):
"""
This is the Python implementation of Java interface 'ForeachBatchFunction'. This wraps
the user-defined 'foreachBatch' function such that it can be called from the JVM when
the query is active.
"""
def __init__(self, sql_ctx, func):
self.sql_ctx = sql_ctx
self.func = func
def call(self, jdf, batch_id):
from pyspark.sql.dataframe import DataFrame
try:
self.func(DataFrame(jdf, self.sql_ctx), batch_id)
except Exception as e:
self.error = e
raise e
class Java:
implements = ['org.apache.spark.sql.execution.streaming.sources.PythonForeachBatchFunction']
def to_str(value):
"""
A wrapper over str(), but converts bool values to lower case strings.
If None is given, just returns None, instead of converting it to string "None".
"""
if isinstance(value, bool):
return str(value).lower()
elif value is None:
return value
else:
return str(value)
| apache-2.0 |
endolith/scikit-image | doc/examples/color_exposure/plot_local_equalize.py | 14 | 2803 | """
============================
Local Histogram Equalization
============================
This examples enhances an image with low contrast, using a method called *local
histogram equalization*, which spreads out the most frequent intensity values
in an image.
The equalized image [1]_ has a roughly linear cumulative distribution function
for each pixel neighborhood.
The local version [2]_ of the histogram equalization emphasized every local
graylevel variations.
References
----------
.. [1] http://en.wikipedia.org/wiki/Histogram_equalization
.. [2] http://en.wikipedia.org/wiki/Adaptive_histogram_equalization
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from skimage import data
from skimage.util.dtype import dtype_range
from skimage.util import img_as_ubyte
from skimage import exposure
from skimage.morphology import disk
from skimage.filters import rank
matplotlib.rcParams['font.size'] = 9
def plot_img_and_hist(img, axes, bins=256):
"""Plot an image along with its histogram and cumulative histogram.
"""
ax_img, ax_hist = axes
ax_cdf = ax_hist.twinx()
# Display image
ax_img.imshow(img, cmap=plt.cm.gray)
ax_img.set_axis_off()
# Display histogram
ax_hist.hist(img.ravel(), bins=bins)
ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))
ax_hist.set_xlabel('Pixel intensity')
xmin, xmax = dtype_range[img.dtype.type]
ax_hist.set_xlim(xmin, xmax)
# Display cumulative distribution
img_cdf, bins = exposure.cumulative_distribution(img, bins)
ax_cdf.plot(bins, img_cdf, 'r')
return ax_img, ax_hist, ax_cdf
# Load an example image
img = img_as_ubyte(data.moon())
# Global equalize
img_rescale = exposure.equalize_hist(img)
# Equalization
selem = disk(30)
img_eq = rank.equalize(img, selem=selem)
# Display results
fig = plt.figure(figsize=(8, 5))
axes = np.zeros((2, 3), dtype=np.object)
axes[0, 0] = plt.subplot(2, 3, 1, adjustable='box-forced')
axes[0, 1] = plt.subplot(2, 3, 2, sharex=axes[0, 0], sharey=axes[0, 0],
adjustable='box-forced')
axes[0, 2] = plt.subplot(2, 3, 3, sharex=axes[0, 0], sharey=axes[0, 0],
adjustable='box-forced')
axes[1, 0] = plt.subplot(2, 3, 4)
axes[1, 1] = plt.subplot(2, 3, 5)
axes[1, 2] = plt.subplot(2, 3, 6)
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img, axes[:, 0])
ax_img.set_title('Low contrast image')
ax_hist.set_ylabel('Number of pixels')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_rescale, axes[:, 1])
ax_img.set_title('Global equalise')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_eq, axes[:, 2])
ax_img.set_title('Local equalize')
ax_cdf.set_ylabel('Fraction of total intensity')
# prevent overlap of y-axis labels
fig.tight_layout()
plt.show()
| bsd-3-clause |
roxyboy/bokeh | bokeh/server/blaze/config.py | 29 | 2291 | from __future__ import absolute_import
import logging
import warnings
from os.path import dirname, join
import numpy as np
import pandas as pd
from blaze import resource
import bokeh.server.tests
log = logging.getLogger(__name__)
qty=10000
gauss = {'oneA': np.random.randn(qty),
'oneB': np.random.randn(qty),
'cats': np.random.randint(0,5,size=qty),
'hundredA': np.random.randn(qty)*100,
'hundredB': np.random.randn(qty)*100}
gauss = pd.DataFrame(gauss)
uniform = {'oneA': np.random.rand(qty),
'oneB': np.random.rand(qty),
'hundredA': np.random.rand(qty)*100,
'hundredB': np.random.rand(qty)*100}
uniform = pd.DataFrame(uniform)
bivariate = {'A1': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+1]),
'A2': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+2]),
'A3': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+3]),
'A4': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+4]),
'A5': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+5]),
'B': np.random.randn(qty),
'C': np.hstack([np.zeros(qty/2), np.ones(qty/2)])}
bivariate = pd.DataFrame(bivariate)
MESSAGE = """
Error loading hdfstore for %s.
Your version of Blaze is too old, or incompatible
or you have missing dependencies such as h5py and/or pytables.
"""
path = join(dirname(bokeh.server.tests.__file__), 'data', 'AAPL.hdf5')
try:
aapl = resource("hdfstore://%s::__data__" % path)
except Exception as e:
aapl = None
log.error(e)
warnings.warn(MESSAGE % "AAPL")
path = join(dirname(bokeh.server.tests.__file__), 'data', 'array.hdf5')
try:
arr = resource(path + "::" + "array")
except Exception as e:
arr = None
log.error(e)
warnings.warn(MESSAGE % "array")
path = join(dirname(bokeh.server.tests.__file__), 'data', 'CensusTracts.hdf5')
try:
census = resource("hdfstore://%s::__data__" % path)
except Exception as e:
census = None
log.error(e)
warnings.warn(MESSAGE % "CensusTracts")
data = dict(uniform=uniform,
gauss=gauss,
bivariate=bivariate)
if aapl:
data['aapl'] = aapl
if census:
data['census'] = census
if arr:
data['array'] = arr
| bsd-3-clause |
johnboyington/homework | ne737/prev/hw4ne737.py | 1 | 1475 | #ne737 hw4
import numpy as np
import matplotlib.pyplot as plt
###############################################################################
# PROBLEM 1
###############################################################################
i = np.array(range(38)) + 815
ci = np.array([189, 171, 182, 170, 191, 184, 176, 188, 209, 208, 251, 268, 343, 387, 473, 540, 581, 652, 655, 667, 619, 588, 487, 415, 370, 285, 267, 199, 196, 183, 158, 150, 162, 139, 160, 153, 138, 145])
m = -0.8
b = 830
B = m*i + b
plt.figure(0)
plt.title("Response of an irradiated piece of Gold Jewelery")
plt.xlabel("Channel, i")
plt.ylabel("$C_i$ (counts/channel)")
plt.xlim(815, 853)
plt.ylim(0, 700)
plt.plot(i, ci)
plt.plot(i, B)
plt.savefig('p1.png')
pk = ci - B
Cjk = np.sum(pk)
Na = 6.022E23
m = 0.003 #g
eta = 0.005
flux_t = 10 ** 12 #cm-2s-2
to = 3600 #s
tw = 3600 #s
tc = 3600 #s
xs = 87.46*1E-24
#still need
Ai = 197 #atomic mass of precursor isotope i in element x (g mol-1)
lam = 2.9776E-6 #decay constant of radioisotope of interest (s-1)
pi = 1 #abundance of precursor isotope i in the element of interest
fk = 1 #branching ratio of the gamma ray k in the radioisotope of interest
A = Cjk * Ai * lam
Ta = (1 - np.exp(-lam * to))**(-1)
Tb = np.exp(lam * tw)
Tc = (1 - np.exp(-lam * tc))**(-1)
B = Ta * Tb * Tc
C = Na * m * pi * fk * eta * flux_t * xs
mux = (A / C) * B
print mux, mux * m
error = (((mux / Cjk)**2)*Cjk)**(0.5)
print error, error * m | gpl-3.0 |
TylerKirby/cltk | cltk/stop/classical_chinese.py | 2 | 1872 | """
Code for building and working with stoplists for Classical Chinese
"""
__author__ = ['Patrick J. Burns <[email protected]>'] # Update author list
__license__ = 'MIT License. See LICENSE.'
from cltk.stop.stop import BaseCorpusStoplist
class CorpusStoplist(BaseCorpusStoplist):
def __init__(self, language='classical_chinese'):
BaseCorpusStoplist.__init__(self, language)
self.punctuation = '。,;?:!、《》'
if not self.numpy_installed or not self.sklearn_installed:
print('\n\nThe Corpus-based Stoplist method requires numpy and scikit-learn for calculations. Try installing with `pip install numpy sklearn scipy`.\n\n')
raise ImportError
else:
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
self.vectorizer = CountVectorizer(analyzer='char', input='content') # Set df?
self.tfidf_vectorizer = TfidfVectorizer(analyzer='char', input='content')
def _remove_punctuation(self, texts, punctuation):
# Change replacement pattern for 'char' analyzer parameter
translator = str.maketrans({key: "" for key in punctuation})
texts = [text.translate(translator) for text in texts]
return texts
if __name__ == "__main__":
test_1 = """方廣錩〔題解〕《天竺國菩提達摩禪師論》,又名《達摩禪師論》,中國僧人假託禪宗初祖菩提達摩所撰典籍,著者不詳,一卷。在敦煌遺書中,"""
test_2 = """至今已經發現兩種題名為《達摩禪師論》的文獻。其一為日本橋本凝胤所藏,首殘尾存,尾題作「達摩禪師論」,係唐高宗開耀元年"""
test_corpus = [test_1, test_2]
S = CorpusStoplist()
print(S.build_stoplist(test_corpus, size=10,
basis='zou', inc_values=True))
| mit |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/numpy-1.9.2/numpy/lib/npyio.py | 21 | 66671 | from __future__ import division, absolute_import, print_function
import sys
import os
import re
import itertools
import warnings
import weakref
from operator import itemgetter
import numpy as np
from . import format
from ._datasource import DataSource
from ._compiled_base import packbits, unpackbits
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like, has_nested_fields,
flatten_dtype, easy_dtype, _bytes_to_name
)
from numpy.compat import (
asbytes, asstr, asbytes_nested, bytes, basestring, unicode
)
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
from future_builtins import map
loads = pickle.loads
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
def seek_gzip_factory(f):
"""Use this factory to produce the class so that we can do a lazy
import on gzip.
"""
import gzip
class GzipFile(gzip.GzipFile):
def seek(self, offset, whence=0):
# figure out new position (we can only seek forwards)
if whence == 1:
offset = self.offset + offset
if whence not in [0, 1]:
raise IOError("Illegal argument")
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in range(count // 1024):
self.read(1024)
self.read(count % 1024)
def tell(self):
return self.offset
if isinstance(f, str):
f = GzipFile(f)
elif isinstance(f, gzip.GzipFile):
# cast to our GzipFile if its already a gzip.GzipFile
try:
name = f.name
except AttributeError:
# Backward compatibility for <= 2.5
name = f.filename
mode = f.mode
f = GzipFile(fileobj=f.fileobj, filename=name)
f.mode = mode
return f
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def zipfile_factory(*args, **kwargs):
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ``.npy`` extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
Parameters
----------
file : file-like object or string
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
elif isinstance(file, gzip.GzipFile):
fid = seek_gzip_factory(file)
else:
fid = file
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
tmp = own_fid
own_fid = False
return NpzFile(fid, own_fid=tmp)
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid)
else:
# Try a pickle
try:
return pickle.load(fid)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see `format`.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of ``.npz`` file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val))
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
return float
elif issubclass(typ, np.complex):
return complex
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
record data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str, optional
The character used to indicate the start of a comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a record
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
Examples
--------
>>> from StringIO import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
if comments is not None:
comments = asbytes(comments)
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
usecols = list(usecols)
fown = False
try:
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
fh = iter(seek_gzip_factory(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
elif sys.version_info[0] == 2:
fh = iter(open(fname, 'U'))
else:
fh = iter(open(fname))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter."""
if comments is None:
line = asbytes(line).strip(asbytes('\r\n'))
else:
line = asbytes(line).split(comments)[0].strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
% line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'write'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
fh.write(asbytes(format % tuple(row) + newline))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skiprows=0, skip_header=0, skip_footer=0, converters=None,
missing='', missing_values=None, filling_values=None,
usecols=None, names=None,
excludelist=None, deletechars=None, replace_space='_',
autostrip=False, case_sensitive=True, defaultfmt="f%i",
unpack=None, usemask=False, loose=True, invalid_raise=True):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
`.gz` or `.bz2`, the file is first decompressed. Note that
generators must return byte strings in Python 3k.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skip_rows : int, optional
`skip_rows` was deprecated in numpy 1.5, and will be removed in
numpy 2.0. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was deprecated in numpy 1.5, and will be removed in
numpy 2.0. Please use `missing_values` instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] Numpy User Guide, section `I/O with Numpy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from StringIO import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing, unicode):
missing = asbytes(missing)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if isinstance(fname, basestring):
if sys.version_info[0] == 2:
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
else:
fhd = iter(np.lib._datasource.open(fname, 'rb'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, or generator. "
"(got %s instead)" % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Get the first valid lines after the first skiprows ones ..
if skiprows:
warnings.warn(
"The use of `skiprows` is deprecated, it will be removed in "
"numpy 2.0.\nPlease use `skip_header` instead.",
DeprecationWarning)
skip_header = skiprows
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = next(fhd)
if names is True:
if comments in first_line:
first_line = (
asbytes('').join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the deprecated `missing`
if missing != asbytes(''):
warnings.warn(
"The use of `missing` is deprecated, it will be removed in "
"Numpy 2.0.\nPlease use `missing_values` instead.",
DeprecationWarning)
values = [str(_) for _ in missing.split(asbytes(","))]
for entry in missing_values:
entry.extend(values)
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values
if user_filling_values is None:
user_filling_values = []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (j, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(j):
try:
j = names.index(j)
i = j
except ValueError:
continue
elif usecols:
try:
i = usecols.index(j)
except ValueError:
# Unused converter specified
continue
else:
i = j
# Find the value to test - first_line is not filtered by usecols:
if len(first_line):
testing_value = first_values[j]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
# Fixme: possible error as following variable never used.
#miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
# Select only the columns we need
if usecols:
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values, missing_values)]))
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = list(zip(names, column_types))
mdtype = list(zip(names, [np.bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', np.bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
# Set default kwargs for genfromtxt as relevant to csv import.
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| mit |
hughdbrown/QSTK-nohist | Bin/converter.py | 2 | 2914 | '''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on Jan 1, 2011
@author:Drew Bratcher
@contact: [email protected]
@summary: Contains tutorial for backtester and report.
'''
#
# fundsToPNG.py
#
# Short script which produces a graph of funds
# over time from a pickle file.
#
# Drew Bratcher
#
from pylab import *
from qstkutil import DataAccess as da
from qstkutil import tsutil as tsu
from quicksim import quickSim
from copy import deepcopy
import math
from pandas import *
import matplotlib.pyplot as plt
import cPickle
def fundsToPNG(funds,output_file):
plt.clf()
if(type(funds)==type(list())):
for i in range(0,len(funds)):
plt.plot(funds[i].index,funds[i].values)
else:
plt.plot(funds.index,funds.values)
plt.ylabel('Fund Value')
plt.xlabel('Date')
plt.gcf().autofmt_xdate(rotation=45)
plt.draw()
savefig(output_file, format='png')
def fundsAnalysisToPNG(funds,output_file):
plt.clf()
if(type(funds)!=type(list())):
print 'fundsmatrix only contains one timeseries, not able to analyze.'
#convert to daily returns
count=list()
dates=list()
sum=list()
for i in range(0,len(funds)):
ret=tsu.daily(funds[i].values)
for j in range(0, len(ret)):
if (funds[i].index[j] in dates):
sum[dates.index(funds[i].index[j])]+=ret[j]
count[dates.index(funds[i].index[j])]+=1
else:
dates.append(funds[i].index[j])
count.append(1)
sum.append(ret[j])
#compute average
tot_ret=deepcopy(sum)
for i in range(0,len(sum)):
tot_ret[i]=sum[i]/count[i]
#compute std
std=zeros(len(sum))
for i in range(0,len(funds)):
temp=tsu.daily(funds[i].values)
for j in range(0,len(temp)):
std[dates.index(funds[i].index[j])]=0
std[dates.index(funds[i].index[j])]+=math.pow(temp[j]-tot_ret[dates.index(funds[i].index[j])],2)
for i in range(1, len(std)):
# std[i]=math.sqrt(std[i]/count[i])+std[i-1]
std[i]=math.sqrt(std[i]/count[i])
#compute total returns
lower=deepcopy(tot_ret)
upper=deepcopy(tot_ret)
tot_ret[0]=funds[0].values[0]
lower[0]=funds[0].values[0]
upper[0]=lower[0]
# for i in range(1,len(tot_ret)):
# tot_ret[i]=tot_ret[i-1]+(tot_ret[i])*tot_ret[i-1]
# lower[i]=tot_ret[i-1]-(std[i])*tot_ret[i-1]
# upper[i]=tot_ret[i-1]+(std[i])*tot_ret[i-1]
for i in range(1,len(tot_ret)):
lower[i]=(tot_ret[i]-std[i]+1)*lower[i-1]
upper[i]=(tot_ret[i]+std[i]+1)*upper[i-1]
tot_ret[i]=(tot_ret[i]+1)*tot_ret[i-1]
plt.clf()
plt.plot(dates,tot_ret)
plt.plot(dates,lower)
plt.plot(dates,upper)
plt.legend(('Tot_Ret','Lower','Upper'),loc='upper left')
plt.ylabel('Fund Total Return')
plt.ylim(ymin=0,ymax=2*tot_ret[0])
plt.draw()
savefig(output_file, format='png')
| bsd-3-clause |
fabioticconi/scikit-learn | sklearn/preprocessing/tests/test_function_transformer.py | 25 | 3492 | from nose.tools import assert_equal
import numpy as np
from sklearn.utils import testing
from sklearn.preprocessing import FunctionTransformer
def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X):
def _func(X, *args, **kwargs):
args_store.append(X)
args_store.extend(args)
kwargs_store.update(kwargs)
return func(X)
return _func
def test_delegate_to_func():
# (args|kwargs)_store will hold the positional and keyword arguments
# passed to the function inside the FunctionTransformer.
args_store = []
kwargs_store = {}
X = np.arange(10).reshape((5, 2))
testing.assert_array_equal(
FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X),
X,
'transform should have returned X unchanged',
)
# The function should only have received X.
assert_equal(
args_store,
[X],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
# reset the argument stores.
args_store[:] = [] # python2 compatible inplace list clear.
kwargs_store.clear()
y = object()
testing.assert_array_equal(
FunctionTransformer(
_make_func(args_store, kwargs_store),
pass_y=True,
).transform(X, y),
X,
'transform should have returned X unchanged',
)
# The function should have received X and y.
assert_equal(
args_store,
[X, y],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
def test_np_log():
X = np.arange(10).reshape((5, 2))
# Test that the numpy.log example still works.
testing.assert_array_equal(
FunctionTransformer(np.log1p).transform(X),
np.log1p(X),
)
def test_kw_arg():
X = np.linspace(0, 1, num=10).reshape((5, 2))
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
# Test that rounding is correct
testing.assert_array_equal(F.transform(X),
np.around(X, decimals=3))
def test_kw_arg_update():
X = np.linspace(0, 1, num=10).reshape((5, 2))
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
F.kw_args['decimals'] = 1
# Test that rounding is correct
testing.assert_array_equal(F.transform(X),
np.around(X, decimals=1))
def test_kw_arg_reset():
X = np.linspace(0, 1, num=10).reshape((5, 2))
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
F.kw_args = dict(decimals=1)
# Test that rounding is correct
testing.assert_array_equal(F.transform(X),
np.around(X, decimals=1))
def test_inverse_transform():
X = np.array([1, 4, 9, 16]).reshape((2, 2))
# Test that inverse_transform works correctly
F = FunctionTransformer(
func=np.sqrt,
inverse_func=np.around, inv_kw_args=dict(decimals=3))
testing.assert_array_equal(
F.inverse_transform(F.transform(X)),
np.around(np.sqrt(X), decimals=3))
| bsd-3-clause |
Winand/pandas | pandas/tests/test_categorical.py | 1 | 189584 | # -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
from warnings import catch_warnings
import pytest
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_float_dtype,
is_integer_dtype)
import pandas as pd
import pandas.compat as compat
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame,
Timestamp, CategoricalIndex, isna,
date_range, DatetimeIndex,
period_range, PeriodIndex,
timedelta_range, TimedeltaIndex, NaT,
Interval, IntervalIndex)
from pandas.compat import range, lrange, u, PY3, PYPY
from pandas.core.config import option_context
from pandas.core.categorical import _recode_for_categories
class TestCategorical(object):
def setup_method(self, method):
self.factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
assert self.factor[0] == 'a'
assert self.factor[-1] == 'c'
subf = self.factor[[0, 1, 2]]
tm.assert_numpy_array_equal(subf._codes,
np.array([0, 1, 1], dtype=np.int8))
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_numpy_array_equal(subf._codes,
np.array([2, 2, 2], dtype=np.int8))
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"method",
[
lambda x: x.cat.set_categories([1, 2, 3]),
lambda x: x.cat.reorder_categories([2, 3, 1], ordered=True),
lambda x: x.cat.rename_categories([1, 2, 3]),
lambda x: x.cat.remove_unused_categories(),
lambda x: x.cat.remove_categories([2]),
lambda x: x.cat.add_categories([4]),
lambda x: x.cat.as_ordered(),
lambda x: x.cat.as_unordered(),
])
def test_getname_categorical_accessor(self, method):
# GH 17509
s = pd.Series([1, 2, 3], name='A').astype('category')
expected = 'A'
result = method(s).name
assert result == expected
def test_getitem_category_type(self):
# GH 14580
# test iloc() on Series with Categorical data
s = pd.Series([1, 2, 3]).astype('category')
# get slice
result = s.iloc[0:2]
expected = pd.Series([1, 2]).astype('category', categories=[1, 2, 3])
tm.assert_series_equal(result, expected)
# get list of indexes
result = s.iloc[[0, 1]]
expected = pd.Series([1, 2]).astype('category', categories=[1, 2, 3])
tm.assert_series_equal(result, expected)
# get boolean array
result = s.iloc[[True, False, False]]
expected = pd.Series([1]).astype('category', categories=[1, 2, 3])
tm.assert_series_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
assert c[0] == 'b'
c[-1] = 'a'
assert c[-1] == 'a'
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical(['c', 'b', 'b', 'a', 'a', 'c', 'c', 'c'],
ordered=True)
tm.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
tm.assert_numpy_array_equal(result, np.array([5], dtype='int8'))
def test_constructor_empty(self):
# GH 17248
c = Categorical([])
expected = Index([])
tm.assert_index_equal(c.categories, expected)
c = Categorical([], categories=[1, 2, 3])
expected = pd.Int64Index([1, 2, 3])
tm.assert_index_equal(c.categories, expected)
def test_constructor_tuples(self):
values = np.array([(1,), (1, 2), (1,), (1, 2)], dtype=object)
result = Categorical(values)
expected = Index([(1,), (1, 2)], tupleize_cols=False)
tm.assert_index_equal(result.categories, expected)
assert result.ordered is False
def test_constructor_tuples_datetimes(self):
# numpy will auto reshape when all of the tuples are the
# same len, so add an extra one with 2 items and slice it off
values = np.array([(Timestamp('2010-01-01'),),
(Timestamp('2010-01-02'),),
(Timestamp('2010-01-01'),),
(Timestamp('2010-01-02'),),
('a', 'b')], dtype=object)[:-1]
result = Categorical(values)
expected = Index([(Timestamp('2010-01-01'),),
(Timestamp('2010-01-02'),)], tupleize_cols=False)
tm.assert_index_equal(result.categories, expected)
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical(arr, ordered=False)
assert not factor.ordered
# this however will raise as cannot be sorted
pytest.raises(
TypeError, lambda: Categorical(arr, ordered=True))
def test_constructor_interval(self):
result = Categorical([Interval(1, 2), Interval(2, 3), Interval(3, 6)],
ordered=True)
ii = IntervalIndex.from_intervals([Interval(1, 2),
Interval(2, 3),
Interval(3, 6)])
exp = Categorical(ii, ordered=True)
tm.assert_categorical_equal(result, exp)
tm.assert_index_equal(result.categories, ii)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
assert c1.is_dtype_equal(c1)
assert c2.is_dtype_equal(c2)
assert c3.is_dtype_equal(c3)
assert c1.is_dtype_equal(c2)
assert not c1.is_dtype_equal(c3)
assert not c1.is_dtype_equal(Index(list('aabca')))
assert not c1.is_dtype_equal(c1.astype(object))
assert c1.is_dtype_equal(CategoricalIndex(c1))
assert (c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
assert not c1.is_dtype_equal(CategoricalIndex(c1, ordered=True))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"], dtype=np.object_)
c1 = Categorical(exp_arr)
tm.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
tm.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
tm.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
pytest.raises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
pytest.raises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
assert not c1.ordered
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
tm.assert_numpy_array_equal(c1.__array__(), c2.__array__())
tm.assert_index_equal(c2.categories, Index(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
tm.assert_categorical_equal(c1, c2)
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(["a", "b", "c", "a"]),
categories=["a", "b", "c", "d"])
tm.assert_categorical_equal(c1, c2)
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
assert is_integer_dtype(cat.categories)
# https://github.com/pandas-dev/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
assert is_integer_dtype(cat.categories)
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
assert is_float_dtype(cat.categories)
cat = pd.Categorical([np.nan, 1., 2., 3.])
assert is_float_dtype(cat.categories)
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notna()])
# assert is_integer_dtype(vals)
# corner cases
cat = pd.Categorical([1])
assert len(cat.categories) == 1
assert cat.categories[0] == 1
assert len(cat.codes) == 1
assert cat.codes[0] == 0
cat = pd.Categorical(["a"])
assert len(cat.categories) == 1
assert cat.categories[0] == "a"
assert len(cat.codes) == 1
assert cat.codes[0] == 0
# Scalars should be converted to lists
cat = pd.Categorical(1)
assert len(cat.categories) == 1
assert cat.categories[0] == 1
assert len(cat.codes) == 1
assert cat.codes[0] == 0
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_not_sequence(self):
# https://github.com/pandas-dev/pandas/issues/16022
with pytest.raises(TypeError):
Categorical(['a', 'b'], categories='a')
def test_constructor_with_null(self):
# Cannot have NaN in categories
with pytest.raises(ValueError):
pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
with pytest.raises(ValueError):
pd.Categorical([None, "a", "b", "c"],
categories=[None, "a", "b", "c"])
with pytest.raises(ValueError):
pd.Categorical(DatetimeIndex(['nat', '20160101']),
categories=[NaT, Timestamp('20160101')])
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
tm.assert_categorical_equal(ci.values, Categorical(ci))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
tm.assert_categorical_equal(ci.values,
Categorical(ci.astype(object),
categories=ci.categories))
def test_constructor_with_generator(self):
# This was raising an Error in isna(single_val).any() because isna
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
tm.assert_categorical_equal(cat, exp)
cat = Categorical(xrange(3))
tm.assert_categorical_equal(cat, exp)
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
tm.assert_categorical_equal(cat, exp)
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
tm.assert_categorical_equal(cat, exp)
def test_constructor_with_datetimelike(self):
# 12077
# constructor wwth a datetimelike and NaT
for dtl in [pd.date_range('1995-01-01 00:00:00',
periods=5, freq='s'),
pd.date_range('1995-01-01 00:00:00',
periods=5, freq='s', tz='US/Eastern'),
pd.timedelta_range('1 day', periods=5, freq='s')]:
s = Series(dtl)
c = Categorical(s)
expected = type(dtl)(s)
expected.freq = None
tm.assert_index_equal(c.categories, expected)
tm.assert_numpy_array_equal(c.codes, np.arange(5, dtype='int8'))
# with NaT
s2 = s.copy()
s2.iloc[-1] = pd.NaT
c = Categorical(s2)
expected = type(dtl)(s2.dropna())
expected.freq = None
tm.assert_index_equal(c.categories, expected)
exp = np.array([0, 1, 2, 3, -1], dtype=np.int8)
tm.assert_numpy_array_equal(c.codes, exp)
result = repr(c)
assert 'NaT' in result
def test_constructor_from_index_series_datetimetz(self):
idx = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
result = pd.Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = pd.Categorical(pd.Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_from_index_series_timedelta(self):
idx = pd.timedelta_range('1 days', freq='D', periods=3)
result = pd.Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = pd.Categorical(pd.Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_from_index_series_period(self):
idx = pd.period_range('2015-01-01', freq='D', periods=3)
result = pd.Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = pd.Categorical(pd.Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_invariant(self):
# GH 14190
vals = [
np.array([1., 1.2, 1.8, np.nan]),
np.array([1, 2, 3], dtype='int64'),
['a', 'b', 'c', np.nan],
[pd.Period('2014-01'), pd.Period('2014-02'), pd.NaT],
[pd.Timestamp('2014-01-01'), pd.Timestamp('2014-01-02'), pd.NaT],
[pd.Timestamp('2014-01-01', tz='US/Eastern'),
pd.Timestamp('2014-01-02', tz='US/Eastern'), pd.NaT],
]
for val in vals:
c = Categorical(val)
c2 = Categorical(c)
tm.assert_categorical_equal(c, c2)
@pytest.mark.parametrize('ordered', [True, False])
def test_constructor_with_dtype(self, ordered):
categories = ['b', 'a', 'c']
dtype = CategoricalDtype(categories, ordered=ordered)
result = pd.Categorical(['a', 'b', 'a', 'c'], dtype=dtype)
expected = pd.Categorical(['a', 'b', 'a', 'c'], categories=categories,
ordered=ordered)
tm.assert_categorical_equal(result, expected)
assert result.ordered is ordered
def test_constructor_dtype_and_others_raises(self):
dtype = CategoricalDtype(['a', 'b'], ordered=True)
with tm.assert_raises_regex(ValueError, "Cannot"):
Categorical(['a', 'b'], categories=['a', 'b'], dtype=dtype)
with tm.assert_raises_regex(ValueError, "Cannot"):
Categorical(['a', 'b'], ordered=True, dtype=dtype)
with tm.assert_raises_regex(ValueError, "Cannot"):
Categorical(['a', 'b'], ordered=False, dtype=dtype)
@pytest.mark.parametrize('categories', [
None, ['a', 'b'], ['a', 'c'],
])
@pytest.mark.parametrize('ordered', [True, False])
def test_constructor_str_category(self, categories, ordered):
result = Categorical(['a', 'b'], categories=categories,
ordered=ordered, dtype='category')
expected = Categorical(['a', 'b'], categories=categories,
ordered=ordered)
tm.assert_categorical_equal(result, expected)
def test_constructor_str_unknown(self):
with tm.assert_raises_regex(ValueError, "Unknown `dtype`"):
Categorical([1, 2], dtype="foo")
def test_constructor_from_categorical_with_dtype(self):
dtype = CategoricalDtype(['a', 'b', 'c'], ordered=True)
values = Categorical(['a', 'b', 'd'])
result = Categorical(values, dtype=dtype)
# We use dtype.categories, not values.categories
expected = Categorical(['a', 'b', 'd'], categories=['a', 'b', 'c'],
ordered=True)
tm.assert_categorical_equal(result, expected)
def test_constructor_from_categorical_with_unknown_dtype(self):
dtype = CategoricalDtype(None, ordered=True)
values = Categorical(['a', 'b', 'd'])
result = Categorical(values, dtype=dtype)
# We use values.categories, not dtype.categories
expected = Categorical(['a', 'b', 'd'], categories=['a', 'b', 'd'],
ordered=True)
tm.assert_categorical_equal(result, expected)
def test_contructor_from_categorical_string(self):
values = Categorical(['a', 'b', 'd'])
# use categories, ordered
result = Categorical(values, categories=['a', 'b', 'c'], ordered=True,
dtype='category')
expected = Categorical(['a', 'b', 'd'], categories=['a', 'b', 'c'],
ordered=True)
tm.assert_categorical_equal(result, expected)
# No string
result = Categorical(values, categories=['a', 'b', 'c'], ordered=True)
tm.assert_categorical_equal(result, expected)
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
pytest.raises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
pytest.raises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
pytest.raises(ValueError, f)
# NaN categories included
def f():
Categorical.from_codes([0, 1, 2], ["a", "b", np.nan])
pytest.raises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
pytest.raises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
tm.assert_categorical_equal(exp, res)
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_validate_ordered(self):
# see gh-14058
exp_msg = "'ordered' must either be 'True' or 'False'"
exp_err = TypeError
# This should be a boolean.
ordered = np.array([0, 1, 2])
with tm.assert_raises_regex(exp_err, exp_msg):
Categorical([1, 2, 3], ordered=ordered)
with tm.assert_raises_regex(exp_err, exp_msg):
Categorical.from_codes([0, 0, 1], categories=['a', 'b', 'c'],
ordered=ordered)
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
tm.assert_categorical_equal(result, expected)
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
tm.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
tm.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
tm.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
tm.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
tm.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
pytest.raises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
pytest.raises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
assert not (cat > cat).any()
def f():
cat > cat_unorderd
pytest.raises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
pytest.raises(TypeError, lambda: cat > s)
pytest.raises(TypeError, lambda: cat_rev > s)
pytest.raises(TypeError, lambda: s < cat)
pytest.raises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
pytest.raises(TypeError, lambda: cat > a)
pytest.raises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
pytest.raises(TypeError, lambda: a < cat)
pytest.raises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
tm.assert_numpy_array_equal(res, exp)
def test_argsort(self):
c = Categorical([5, 3, 1, 4, 2], ordered=True)
expected = np.array([2, 4, 1, 3, 0])
tm.assert_numpy_array_equal(c.argsort(ascending=True), expected,
check_dtype=False)
expected = expected[::-1]
tm.assert_numpy_array_equal(c.argsort(ascending=False), expected,
check_dtype=False)
def test_numpy_argsort(self):
c = Categorical([5, 3, 1, 4, 2], ordered=True)
expected = np.array([2, 4, 1, 3, 0])
tm.assert_numpy_array_equal(np.argsort(c), expected,
check_dtype=False)
tm.assert_numpy_array_equal(np.argsort(c, kind='mergesort'), expected,
check_dtype=False)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.argsort,
c, axis=0)
msg = "the 'order' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.argsort,
c, order='C')
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
tm.assert_numpy_array_equal(isna(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
tm.assert_categorical_equal(factor, self.factor)
def test_set_categories_inplace(self):
cat = self.factor.copy()
cat.set_categories(['a', 'b', 'c', 'd'], inplace=True)
tm.assert_index_equal(cat.categories, pd.Index(['a', 'b', 'c', 'd']))
def test_describe(self):
# string type
desc = self.factor.describe()
assert self.factor.ordered
exp_index = pd.CategoricalIndex(['a', 'b', 'c'], name='categories',
ordered=self.factor.ordered)
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=exp_index)
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
exp_index = pd.CategoricalIndex(['a', 'b', 'c', 'd'],
ordered=self.factor.ordered,
name='categories')
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=exp_index)
tm.assert_frame_equal(desc, expected)
# check an integer one
cat = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1])
desc = cat.describe()
exp_index = pd.CategoricalIndex([1, 2, 3], ordered=cat.ordered,
name='categories')
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=exp_index)
tm.assert_frame_equal(desc, expected)
# https://github.com/pandas-dev/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
assert actual == expected
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
assert actual == expected
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
assert actual == expected
assert expected == actual
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
assert expected == actual
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
assert expected == repr(factor)
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
assert exp == repr(a)
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
assert _rep(c) == expected
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう'] * 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
assert _rep(c) == expected
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう'] * 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
assert _rep(c) == expected
def test_tab_complete_warning(self, ip):
# https://github.com/pandas-dev/pandas/issues/16409
pytest.importorskip('IPython', minversion="6.0.0")
from IPython.core.completer import provisionalcompleter
code = "import pandas as pd; c = pd.Categorical([])"
ip.run_code(code)
with tm.assert_produces_warning(None):
with provisionalcompleter('ignore'):
list(ip.Completer.completions('c.', 1))
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.int8)
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
tm.assert_numpy_array_equal(cat1._codes, exp_arr)
tm.assert_index_equal(cat1.categories, exp_idx)
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.int8)
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
tm.assert_numpy_array_equal(cat2._codes, exp_arr)
tm.assert_index_equal(cat2.categories, exp_idx2)
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype=np.int8)
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
tm.assert_numpy_array_equal(cat3._codes, exp_arr)
tm.assert_index_equal(cat3.categories, exp_idx)
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1], dtype=np.int64)
s.categories = [1, 2, 3]
tm.assert_numpy_array_equal(s.__array__(), exp)
tm.assert_index_equal(s.categories, Index([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
pytest.raises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
pytest.raises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
assert not cat.ordered
cat = Categorical([0, 1, 2], ordered=False)
assert not cat.ordered
cat = Categorical([0, 1, 2], ordered=True)
assert cat.ordered
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
tm.assert_index_equal(cat1.categories, Index(['a', 'b', 'c']))
assert not cat1.ordered
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
tm.assert_index_equal(cat2.categories, Index(['b', 'c', 'a']))
assert not cat2.ordered
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
tm.assert_index_equal(cat3.categories, Index(['a', 'b', 'c']))
assert cat3.ordered
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
tm.assert_index_equal(cat4.categories, Index(['b', 'c', 'a']))
assert cat4.ordered
def test_set_dtype_same(self):
c = Categorical(['a', 'b', 'c'])
result = c._set_dtype(CategoricalDtype(['a', 'b', 'c']))
tm.assert_categorical_equal(result, c)
def test_set_dtype_new_categories(self):
c = Categorical(['a', 'b', 'c'])
result = c._set_dtype(CategoricalDtype(['a', 'b', 'c', 'd']))
tm.assert_numpy_array_equal(result.codes, c.codes)
tm.assert_index_equal(result.dtype.categories,
pd.Index(['a', 'b', 'c', 'd']))
def test_set_dtype_nans(self):
c = Categorical(['a', 'b', np.nan])
result = c._set_dtype(CategoricalDtype(['a', 'c']))
tm.assert_numpy_array_equal(result.codes, np.array([0, -1, -1],
dtype='int8'))
def test_set_categories_private(self):
cat = Categorical(['a', 'b', 'c'], categories=['a', 'b', 'c', 'd'])
cat._set_categories(['a', 'c', 'd', 'e'])
expected = Categorical(['a', 'c', 'd'], categories=list('acde'))
tm.assert_categorical_equal(cat, expected)
# fastpath
cat = Categorical(['a', 'b', 'c'], categories=['a', 'b', 'c', 'd'])
cat._set_categories(['a', 'c', 'd', 'e'], fastpath=True)
expected = Categorical(['a', 'c', 'd'], categories=list('acde'))
tm.assert_categorical_equal(cat, expected)
@pytest.mark.parametrize('values, categories, new_categories', [
# No NaNs, same cats, same order
(['a', 'b', 'a'], ['a', 'b'], ['a', 'b'],),
# No NaNs, same cats, different order
(['a', 'b', 'a'], ['a', 'b'], ['b', 'a'],),
# Same, unsorted
(['b', 'a', 'a'], ['a', 'b'], ['a', 'b'],),
# No NaNs, same cats, different order
(['b', 'a', 'a'], ['a', 'b'], ['b', 'a'],),
# NaNs
(['a', 'b', 'c'], ['a', 'b'], ['a', 'b']),
(['a', 'b', 'c'], ['a', 'b'], ['b', 'a']),
(['b', 'a', 'c'], ['a', 'b'], ['a', 'b']),
(['b', 'a', 'c'], ['a', 'b'], ['a', 'b']),
# Introduce NaNs
(['a', 'b', 'c'], ['a', 'b'], ['a']),
(['a', 'b', 'c'], ['a', 'b'], ['b']),
(['b', 'a', 'c'], ['a', 'b'], ['a']),
(['b', 'a', 'c'], ['a', 'b'], ['a']),
# No overlap
(['a', 'b', 'c'], ['a', 'b'], ['d', 'e']),
])
@pytest.mark.parametrize('ordered', [True, False])
def test_set_dtype_many(self, values, categories, new_categories,
ordered):
c = Categorical(values, categories)
expected = Categorical(values, new_categories, ordered)
result = c._set_dtype(expected.dtype)
tm.assert_categorical_equal(result, expected)
def test_set_dtype_no_overlap(self):
c = Categorical(['a', 'b', 'c'], ['d', 'e'])
result = c._set_dtype(CategoricalDtype(['a', 'b']))
expected = Categorical([None, None, None], categories=['a', 'b'])
tm.assert_categorical_equal(result, expected)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
assert not cat2.ordered
cat2 = cat.as_ordered()
assert cat2.ordered
cat2.as_unordered(inplace=True)
assert not cat2.ordered
cat2.as_ordered(inplace=True)
assert cat2.ordered
assert cat2.set_ordered(True).ordered
assert not cat2.set_ordered(False).ordered
cat2.set_ordered(True, inplace=True)
assert cat2.ordered
cat2.set_ordered(False, inplace=True)
assert not cat2.ordered
# removed in 0.19.0
msg = "can\'t set attribute"
with tm.assert_raises_regex(AttributeError, msg):
cat.ordered = True
with tm.assert_raises_regex(AttributeError, msg):
cat.ordered = False
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = Index(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"], dtype=np.object_)
res = cat.set_categories(["c", "b", "a"], inplace=True)
tm.assert_index_equal(cat.categories, exp_categories)
tm.assert_numpy_array_equal(cat.__array__(), exp_values)
assert res is None
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
tm.assert_index_equal(cat.categories, exp_categories)
tm.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = Index(["a", "b", "c"])
tm.assert_index_equal(res.categories, exp_categories_back)
tm.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
tm.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0],
dtype=np.int8))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
tm.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0],
dtype=np.int8))
tm.assert_index_equal(res.categories, Index(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = Index(["a", "b", "c", "d"])
tm.assert_index_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
tm.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0],
dtype=np.int8))
tm.assert_index_equal(c.categories, Index([1, 2, 3, 4]))
exp = np.array([1, 2, 3, 4, 1], dtype=np.int64)
tm.assert_numpy_array_equal(c.get_values(), exp)
# all "pointers" to '4' must be changed from 3 to 0,...
c = c.set_categories([4, 3, 2, 1])
# positions are changed
tm.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3],
dtype=np.int8))
# categories are now in new order
tm.assert_index_equal(c.categories, Index([4, 3, 2, 1]))
# output is the same
exp = np.array([1, 2, 3, 4, 1], dtype=np.int64)
tm.assert_numpy_array_equal(c.get_values(), exp)
assert c.min() == 4
assert c.max() == 1
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
assert not c2.ordered
tm.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
assert not c2.ordered
tm.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
tm.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1],
dtype=np.int64))
tm.assert_index_equal(res.categories, Index([1, 2, 3]))
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
exp_cat = Index(["a", "b", "c"])
tm.assert_index_equal(cat.categories, exp_cat)
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
assert res is None
tm.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1],
dtype=np.int64))
tm.assert_index_equal(cat.categories, Index([1, 2, 3]))
# Lengthen
with pytest.raises(ValueError):
cat.rename_categories([1, 2, 3, 4])
# Shorten
with pytest.raises(ValueError):
cat.rename_categories([1, 2])
def test_rename_categories_dict(self):
# GH 17336
cat = pd.Categorical(['a', 'b', 'c', 'd'])
res = cat.rename_categories({'a': 4, 'b': 3, 'c': 2, 'd': 1})
expected = Index([4, 3, 2, 1])
tm.assert_index_equal(res.categories, expected)
# Test for inplace
res = cat.rename_categories({'a': 4, 'b': 3, 'c': 2, 'd': 1},
inplace=True)
assert res is None
tm.assert_index_equal(cat.categories, expected)
# Test for dicts of smaller length
cat = pd.Categorical(['a', 'b', 'c', 'd'])
res = cat.rename_categories({'a': 1, 'c': 3})
expected = Index([1, 'b', 3, 'd'])
tm.assert_index_equal(res.categories, expected)
# Test for dicts with bigger length
cat = pd.Categorical(['a', 'b', 'c', 'd'])
res = cat.rename_categories({'a': 1, 'b': 2, 'c': 3,
'd': 4, 'e': 5, 'f': 6})
expected = Index([1, 2, 3, 4])
tm.assert_index_equal(res.categories, expected)
# Test for dicts with no items from old categories
cat = pd.Categorical(['a', 'b', 'c', 'd'])
res = cat.rename_categories({'f': 1, 'g': 3})
expected = Index(['a', 'b', 'c', 'd'])
tm.assert_index_equal(res.categories, expected)
@pytest.mark.parametrize('codes, old, new, expected', [
([0, 1], ['a', 'b'], ['a', 'b'], [0, 1]),
([0, 1], ['b', 'a'], ['b', 'a'], [0, 1]),
([0, 1], ['a', 'b'], ['b', 'a'], [1, 0]),
([0, 1], ['b', 'a'], ['a', 'b'], [1, 0]),
([0, 1, 0, 1], ['a', 'b'], ['a', 'b', 'c'], [0, 1, 0, 1]),
([0, 1, 2, 2], ['a', 'b', 'c'], ['a', 'b'], [0, 1, -1, -1]),
([0, 1, -1], ['a', 'b', 'c'], ['a', 'b', 'c'], [0, 1, -1]),
([0, 1, -1], ['a', 'b', 'c'], ['b'], [-1, 0, -1]),
([0, 1, -1], ['a', 'b', 'c'], ['d'], [-1, -1, -1]),
([0, 1, -1], ['a', 'b', 'c'], [], [-1, -1, -1]),
([-1, -1], [], ['a', 'b'], [-1, -1]),
([1, 0], ['b', 'a'], ['a', 'b'], [0, 1]),
])
def test_recode_to_categories(self, codes, old, new, expected):
codes = np.asanyarray(codes, dtype=np.int8)
expected = np.asanyarray(expected, dtype=np.int8)
old = Index(old)
new = Index(new)
result = _recode_for_categories(codes, old, new)
tm.assert_numpy_array_equal(result, expected)
def test_recode_to_categories_large(self):
N = 1000
codes = np.arange(N)
old = Index(codes)
expected = np.arange(N - 1, -1, -1, dtype=np.int16)
new = Index(expected)
result = _recode_for_categories(codes, old, new)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('values, categories, new_categories', [
# No NaNs, same cats, same order
(['a', 'b', 'a'], ['a', 'b'], ['a', 'b'],),
# No NaNs, same cats, different order
(['a', 'b', 'a'], ['a', 'b'], ['b', 'a'],),
# Same, unsorted
(['b', 'a', 'a'], ['a', 'b'], ['a', 'b'],),
# No NaNs, same cats, different order
(['b', 'a', 'a'], ['a', 'b'], ['b', 'a'],),
# NaNs
(['a', 'b', 'c'], ['a', 'b'], ['a', 'b']),
(['a', 'b', 'c'], ['a', 'b'], ['b', 'a']),
(['b', 'a', 'c'], ['a', 'b'], ['a', 'b']),
(['b', 'a', 'c'], ['a', 'b'], ['a', 'b']),
# Introduce NaNs
(['a', 'b', 'c'], ['a', 'b'], ['a']),
(['a', 'b', 'c'], ['a', 'b'], ['b']),
(['b', 'a', 'c'], ['a', 'b'], ['a']),
(['b', 'a', 'c'], ['a', 'b'], ['a']),
# No overlap
(['a', 'b', 'c'], ['a', 'b'], ['d', 'e']),
])
@pytest.mark.parametrize('ordered', [True, False])
def test_set_categories_many(self, values, categories, new_categories,
ordered):
c = Categorical(values, categories)
expected = Categorical(values, new_categories, ordered)
result = c.set_categories(new_categories, ordered=ordered)
tm.assert_categorical_equal(result, expected)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
tm.assert_categorical_equal(cat, old)
# only res is changed
tm.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
assert res is None
tm.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
pytest.raises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
pytest.raises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
pytest.raises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
tm.assert_categorical_equal(cat, old)
tm.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
tm.assert_categorical_equal(cat, old)
tm.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
tm.assert_categorical_equal(cat, new)
assert res is None
# new is in old categories
def f():
cat.add_categories(["d"])
pytest.raises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
tm.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
tm.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
tm.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
tm.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
tm.assert_categorical_equal(cat, old)
tm.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
tm.assert_categorical_equal(cat, old)
tm.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
tm.assert_categorical_equal(cat, new)
assert res is None
# removal is not in categories
def f():
cat.remove_categories(["c"])
pytest.raises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = Index(["a", "b", "c", "d", "e"])
exp_categories_dropped = Index(["a", "b", "c", "d"])
tm.assert_index_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
tm.assert_index_equal(res.categories, exp_categories_dropped)
tm.assert_index_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
tm.assert_index_equal(c.categories, exp_categories_dropped)
assert res is None
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
tm.assert_index_equal(res.categories,
Index(np.array(["a", "b", "c"])))
exp_codes = np.array([0, 1, 2, -1], dtype=np.int8)
tm.assert_numpy_array_equal(res.codes, exp_codes)
tm.assert_index_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
tm.assert_index_equal(out.categories, Index(['B', 'D', 'F']))
exp_codes = np.array([2, -1, 1, 0, 1, 2, -1], dtype=np.int8)
tm.assert_numpy_array_equal(out.codes, exp_codes)
assert out.get_values().tolist() == val
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
assert out.get_values().tolist() == val.tolist()
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
tm.assert_index_equal(c.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0],
dtype=np.int8))
c[1] = np.nan
tm.assert_index_equal(c.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0],
dtype=np.int8))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
tm.assert_index_equal(c.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0],
dtype=np.int8))
def test_isna(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isna()
tm.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
tm.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
pytest.raises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
pytest.raises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
tm.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
tm.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
pytest.raises(TypeError, lambda: cat.min())
pytest.raises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
assert _min == "a"
assert _max == "d"
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
assert _min == "d"
assert _max == "a"
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
assert np.isnan(_min)
assert _max == "b"
_min = cat.min(numeric_only=True)
assert _min == "c"
_max = cat.max(numeric_only=True)
assert _max == "b"
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
assert np.isnan(_min)
assert _max == 1
_min = cat.min(numeric_only=True)
assert _min == 2
_max = cat.max(numeric_only=True)
assert _max == 1
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = Index(["a", "b"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
tm.assert_categorical_equal(res, cat)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = Index(["c", "a", "b"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
exp_cat = Categorical(exp, categories=['c', 'a', 'b'])
tm.assert_categorical_equal(res, exp_cat)
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = Index(["b", "a"])
tm.assert_index_equal(res.categories, exp)
exp_cat = Categorical(["b", np.nan, "a"], categories=["b", "a"])
tm.assert_categorical_equal(res, exp_cat)
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp_cat = Categorical(['b', 'a'], categories=['a', 'b'], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp_cat = Categorical(['c', 'b', 'a'], categories=['a', 'b', 'c'],
ordered=True)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp_cat = Categorical(['b', 'a'], categories=['a', 'b'], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp_cat = Categorical(['b', np.nan, 'a'], categories=['a', 'b'],
ordered=True)
tm.assert_categorical_equal(res, exp_cat)
def test_unique_index_series(self):
c = Categorical([3, 1, 2, 2, 1], categories=[3, 2, 1])
# Categorical.unique sorts categories by appearance order
# if ordered=False
exp = Categorical([3, 1, 2], categories=[3, 1, 2])
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(pd.Series(c).unique(), exp)
c = Categorical([1, 1, 2, 2], categories=[3, 2, 1])
exp = Categorical([1, 2], categories=[1, 2])
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(pd.Series(c).unique(), exp)
c = Categorical([3, 1, 2, 2, 1], categories=[3, 2, 1], ordered=True)
# Categorical.unique keeps categories order if ordered=True
exp = Categorical([3, 1, 2], categories=[3, 2, 1], ordered=True)
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(pd.Series(c).unique(), exp)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
tm.assert_categorical_equal(res, exp)
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
tm.assert_categorical_equal(res, exp)
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 4, 3, 2, 1],
categories=[5, 4, 3, 2, 1], ordered=True)
tm.assert_categorical_equal(res, exp)
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([5, 4], categories=[5, 4, 3, 2, 1], ordered=True)
tm.assert_categorical_equal(res, exp)
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
tm.assert_categorical_equal(res, exp)
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
tm.assert_categorical_equal(res, exp)
def test_sort_values(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, cat.categories)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, cat.categories)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, cat.categories)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort_values(inplace=True)
exp = np.array(["a", "b", "c", "d"], dtype=object)
tm.assert_numpy_array_equal(cat1.__array__(), exp)
tm.assert_index_equal(res.categories, cat.categories)
# reverse
cat = Categorical(["a", "c", "c", "b", "d"], ordered=True)
res = cat.sort_values(ascending=False)
exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object)
exp_categories = Index(["a", "b", "c", "d"])
tm.assert_numpy_array_equal(res.__array__(), exp_val)
tm.assert_index_equal(res.categories, exp_categories)
def test_sort_values_na_position(self):
# see gh-12882
cat = Categorical([5, 2, np.nan, 2, np.nan], ordered=True)
exp_categories = Index([2, 5])
exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan])
res = cat.sort_values() # default arguments
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, exp_categories)
exp = np.array([np.nan, np.nan, 2.0, 2.0, 5.0])
res = cat.sort_values(ascending=True, na_position='first')
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, exp_categories)
exp = np.array([np.nan, np.nan, 5.0, 2.0, 2.0])
res = cat.sort_values(ascending=False, na_position='first')
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, exp_categories)
exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan])
res = cat.sort_values(ascending=True, na_position='last')
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, exp_categories)
exp = np.array([5.0, 2.0, 2.0, np.nan, np.nan])
res = cat.sort_values(ascending=False, na_position='last')
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = Index(["a", "b", "c", "d"])
tm.assert_numpy_array_equal(res.__array__(), exp_val)
tm.assert_index_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = Index(["a", "b", "c", "d"])
tm.assert_numpy_array_equal(res.__array__(), exp_val)
tm.assert_index_equal(res.categories, exp_categories)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
assert sliced == "d"
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
tm.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
cat[1] = np.nan
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
tm.assert_categorical_equal(cat, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
tm.assert_categorical_equal(sp1, xp1)
tm.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
tm.assert_categorical_equal(sn2, xp2)
tm.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
tm.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = 3 + 3 * 8 # 3 int8s for values + 3 int64s for categories
assert cat.nbytes == exp
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
# .categories is an index, so we include the hashtable
assert 0 < cat.nbytes <= cat.memory_usage()
assert 0 < cat.nbytes <= cat.memory_usage(deep=True)
cat = pd.Categorical(['foo', 'foo', 'bar'])
assert cat.memory_usage(deep=True) > cat.nbytes
if not PYPY:
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
assert abs(diff) < 100
def test_searchsorted(self):
# https://github.com/pandas-dev/pandas/issues/8420
# https://github.com/pandas-dev/pandas/issues/14522
c1 = pd.Categorical(['cheese', 'milk', 'apple', 'bread', 'bread'],
categories=['cheese', 'milk', 'apple', 'bread'],
ordered=True)
s1 = pd.Series(c1)
c2 = pd.Categorical(['cheese', 'milk', 'apple', 'bread', 'bread'],
categories=['cheese', 'milk', 'apple', 'bread'],
ordered=False)
s2 = pd.Series(c2)
# Searching for single item argument, side='left' (default)
res_cat = c1.searchsorted('apple')
res_ser = s1.searchsorted('apple')
exp = np.array([2], dtype=np.intp)
tm.assert_numpy_array_equal(res_cat, exp)
tm.assert_numpy_array_equal(res_ser, exp)
# Searching for single item array, side='left' (default)
res_cat = c1.searchsorted(['bread'])
res_ser = s1.searchsorted(['bread'])
exp = np.array([3], dtype=np.intp)
tm.assert_numpy_array_equal(res_cat, exp)
tm.assert_numpy_array_equal(res_ser, exp)
# Searching for several items array, side='right'
res_cat = c1.searchsorted(['apple', 'bread'], side='right')
res_ser = s1.searchsorted(['apple', 'bread'], side='right')
exp = np.array([3, 5], dtype=np.intp)
tm.assert_numpy_array_equal(res_cat, exp)
tm.assert_numpy_array_equal(res_ser, exp)
# Searching for a single value that is not from the Categorical
pytest.raises(ValueError, lambda: c1.searchsorted('cucumber'))
pytest.raises(ValueError, lambda: s1.searchsorted('cucumber'))
# Searching for multiple values one of each is not from the Categorical
pytest.raises(ValueError,
lambda: c1.searchsorted(['bread', 'cucumber']))
pytest.raises(ValueError,
lambda: s1.searchsorted(['bread', 'cucumber']))
# searchsorted call for unordered Categorical
pytest.raises(ValueError, lambda: c2.searchsorted('apple'))
pytest.raises(ValueError, lambda: s2.searchsorted('apple'))
with tm.assert_produces_warning(FutureWarning):
res = c1.searchsorted(v=['bread'])
exp = np.array([3], dtype=np.intp)
tm.assert_numpy_array_equal(res, exp)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
tm.assert_numpy_array_equal(res, exp)
def test_deprecated_from_array(self):
# GH13854, `.from_array` is deprecated
with tm.assert_produces_warning(FutureWarning):
Categorical.from_array([0, 1])
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
tm.assert_numpy_array_equal(dt_cat > dt_cat[0],
np.array([False, True, True]))
tm.assert_numpy_array_equal(dt_cat[0] < dt_cat,
np.array([False, True, True]))
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
tm.assert_numpy_array_equal(cat > cat[0],
np.array([False, True, True]))
tm.assert_numpy_array_equal(cat[0] < cat,
np.array([False, True, True]))
def test_comparison_with_unknown_scalars(self):
# https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
pytest.raises(TypeError, lambda: cat < 4)
pytest.raises(TypeError, lambda: cat > 4)
pytest.raises(TypeError, lambda: 4 < cat)
pytest.raises(TypeError, lambda: 4 > cat)
tm.assert_numpy_array_equal(cat == 4,
np.array([False, False, False]))
tm.assert_numpy_array_equal(cat != 4,
np.array([True, True, True]))
def test_map(self):
c = pd.Categorical(list('ABABC'), categories=list('CBA'),
ordered=True)
result = c.map(lambda x: x.lower())
exp = pd.Categorical(list('ababc'), categories=list('cba'),
ordered=True)
tm.assert_categorical_equal(result, exp)
c = pd.Categorical(list('ABABC'), categories=list('ABC'),
ordered=False)
result = c.map(lambda x: x.lower())
exp = pd.Categorical(list('ababc'), categories=list('abc'),
ordered=False)
tm.assert_categorical_equal(result, exp)
result = c.map(lambda x: 1)
# GH 12766: Return an index not an array
tm.assert_index_equal(result, Index(np.array([1] * 5, dtype=np.int64)))
def test_validate_inplace(self):
cat = Categorical(['A', 'B', 'B', 'C', 'A'])
invalid_values = [1, "True", [1, 2, 3], 5.0]
for value in invalid_values:
with pytest.raises(ValueError):
cat.set_ordered(value=True, inplace=value)
with pytest.raises(ValueError):
cat.as_ordered(inplace=value)
with pytest.raises(ValueError):
cat.as_unordered(inplace=value)
with pytest.raises(ValueError):
cat.set_categories(['X', 'Y', 'Z'], rename=True, inplace=value)
with pytest.raises(ValueError):
cat.rename_categories(['X', 'Y', 'Z'], inplace=value)
with pytest.raises(ValueError):
cat.reorder_categories(
['X', 'Y', 'Z'], ordered=True, inplace=value)
with pytest.raises(ValueError):
cat.add_categories(
new_categories=['D', 'E', 'F'], inplace=value)
with pytest.raises(ValueError):
cat.remove_categories(removals=['D', 'E', 'F'], inplace=value)
with pytest.raises(ValueError):
cat.remove_unused_categories(inplace=value)
with pytest.raises(ValueError):
cat.sort_values(inplace=value)
@pytest.mark.xfail(reason="Imaginary values not supported in Categorical")
def test_imaginary(self):
values = [1, 2, 3 + 1j]
c1 = pd.Categorical(values)
tm.assert_index_equal(c1.categories, pd.Index(values))
tm.assert_numpy_array_equal(np.array(c1), np.array(values))
class TestCategoricalAsBlock(object):
def setup_method(self, method):
self.factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
assert result.codes.dtype == 'int8'
result = Categorical(['foo%05d' % i for i in range(400)])
assert result.codes.dtype == 'int16'
result = Categorical(['foo%05d' % i for i in range(40000)])
assert result.codes.dtype == 'int32'
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
assert result.codes.dtype == 'int8'
result = result.add_categories(['foo%05d' % i for i in range(400)])
assert result.codes.dtype == 'int16'
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
assert result.codes.dtype == 'int8'
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
assert s.dtype == 'category'
assert len(s) == len(self.factor)
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
assert len(df) == len(self.factor)
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
assert len(df) == len(self.factor)
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
assert result2.name == 'B'
assert len(df) == len(self.factor)
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, 'John P. Doe'], [2, 'Jane Dove'],
[1, 'John P. Doe']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
assert result == expected
result = x.person_name[0]
assert result == expected
result = x.person_name.loc[0]
assert result == expected
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([pd.Categorical(list('abc')), pd.Categorical(list(
'abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
pytest.raises(
ValueError,
lambda: DataFrame([pd.Categorical(list('abc')),
pd.Categorical(list('abdefg'))]))
# ndim > 1
pytest.raises(NotImplementedError,
lambda: pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
with catch_warnings(record=True):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat, copy=True)
assert s.cat is not cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat)
assert s.values is cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# NaNs are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
tm.assert_index_equal(s.cat.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(s.values.codes,
np.array([0, 1, -1, 0], dtype=np.int8))
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
tm.assert_index_equal(s.cat.categories, Index(["a", "b"]))
assert not s.cat.ordered, False
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
tm.assert_categorical_equal(s.values, exp)
res = s.cat.set_categories(["b", "a"])
tm.assert_categorical_equal(res.values, exp)
s[:] = "a"
s = s.cat.remove_unused_categories()
tm.assert_index_equal(s.cat.categories, Index(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
pytest.raises(AttributeError, lambda: Series([1, 2, 3]).cat)
tm.assert_raises_regex(
AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda: Series([1, 2, 3]).cat)
pytest.raises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)
pytest.raises(AttributeError, lambda: Series(np.arange(5.)).cat)
pytest.raises(AttributeError,
lambda: Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = Index(["a", "b", "c"])
tm.assert_index_equal(s.cat.categories, exp_categories)
s.cat.categories = [1, 2, 3]
exp_categories = Index([1, 2, 3])
tm.assert_index_equal(s.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
assert s.cat.ordered
s = s.cat.as_unordered()
assert not s.cat.ordered
s.cat.as_ordered(inplace=True)
assert s.cat.ordered
# reorder
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = Index(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"], dtype=np.object_)
s = s.cat.set_categories(["c", "b", "a"])
tm.assert_index_equal(s.cat.categories, exp_categories)
tm.assert_numpy_array_equal(s.values.__array__(), exp_values)
tm.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"
]))
exp_categories = Index(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"], dtype=np.object_)
s = s.cat.remove_unused_categories()
tm.assert_index_equal(s.cat.categories, exp_categories)
tm.assert_numpy_array_equal(s.values.__array__(), exp_values)
tm.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
def f():
s.set_categories([4, 3, 2, 1])
pytest.raises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
labels=labels)
def test_assignment_to_dataframe(self):
# assignment
df = DataFrame({'value': np.array(
np.random.randint(0, 10000, 100), dtype='int32')})
labels = Categorical(["{0} - {1}".format(i, i + 499)
for i in range(0, 10000, 500)])
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype('int32'), CategoricalDtype(categories=labels,
ordered=False)],
index=['value', 'D'])
tm.assert_series_equal(result, expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'),
CategoricalDtype(categories=labels, ordered=False),
CategoricalDtype(categories=labels, ordered=False)],
index=['value', 'D', 'E'])
tm.assert_series_equal(result, expected)
result1 = df['D']
result2 = df['E']
tm.assert_categorical_equal(result1._data._block.values, d)
# sorting
s.name = 'E'
tm.assert_series_equal(result2.sort_index(), s.sort_index())
cat = pd.Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = pd.DataFrame(pd.Series(cat))
def test_describe(self):
# Categoricals should not show up together with numerical columns
result = self.cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = pd.Series(pd.Categorical(["a", "b", "c", "c"]))
df3 = pd.DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
tm.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
assert exp == a.__unicode__()
a = pd.Series(pd.Categorical(["a", "b"] * 25))
exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" +
"Length: 50, dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
assert exp == repr(a)
levs = list("abcdefghijklmnopqrstuvwxyz")
a = pd.Series(pd.Categorical(
["a", "b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" + "dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
assert exp == a.__unicode__()
def test_categorical_repr(self):
c = pd.Categorical([1, 2, 3])
exp = """[1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
assert repr(c) == exp
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
assert repr(c) == exp
c = pd.Categorical([1, 2, 3, 4, 5] * 10)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1, 2, 3, 4, 5]"""
assert repr(c) == exp
c = pd.Categorical(np.arange(20))
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
assert repr(c) == exp
def test_categorical_repr_ordered(self):
c = pd.Categorical([1, 2, 3], ordered=True)
exp = """[1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
assert repr(c) == exp
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3],
ordered=True)
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
assert repr(c) == exp
c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
assert repr(c) == exp
c = pd.Categorical(np.arange(20), ordered=True)
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
assert repr(c) == exp
def test_categorical_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
# TODO(wesm): exceeding 80 characters in the console is not good
# behavior
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]""")
assert repr(c) == exp
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]")
assert repr(c) == exp
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
assert repr(c) == exp
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
"2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
"2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
assert repr(c) == exp
def test_categorical_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
assert repr(c) == exp
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
assert repr(c) == exp
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
assert repr(c) == exp
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
assert repr(c) == exp
def test_categorical_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]""" # noqa
assert repr(c) == exp
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]""" # noqa
assert repr(c) == exp
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
assert repr(c) == exp
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]""" # noqa
assert repr(c) == exp
def test_categorical_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]""" # noqa
assert repr(c) == exp
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]""" # noqa
assert repr(c) == exp
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
assert repr(c) == exp
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]""" # noqa
assert repr(c) == exp
def test_categorical_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
assert repr(c) == exp
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]""" # noqa
assert repr(c) == exp
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]""" # noqa
assert repr(c) == exp
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]""" # noqa
assert repr(c) == exp
def test_categorical_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" # noqa
assert repr(c) == exp
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" # noqa
assert repr(c) == exp
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]""" # noqa
assert repr(c) == exp
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]""" # noqa
assert repr(c) == exp
def test_categorical_series_repr(self):
s = pd.Series(pd.Categorical([1, 2, 3]))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]"""
assert repr(s) == exp
s = pd.Series(pd.Categorical(np.arange(10)))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
assert repr(s) == exp
def test_categorical_series_repr_ordered(self):
s = pd.Series(pd.Categorical([1, 2, 3], ordered=True))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""
assert repr(s) == exp
s = pd.Series(pd.Categorical(np.arange(10), ordered=True))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
assert repr(s) == exp
def test_categorical_series_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]""" # noqa
assert repr(s) == exp
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]""" # noqa
assert repr(s) == exp
def test_categorical_series_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
assert repr(s) == exp
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
assert repr(s) == exp
def test_categorical_series_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]""" # noqa
assert repr(s) == exp
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
assert repr(s) == exp
def test_categorical_series_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]""" # noqa
assert repr(s) == exp
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
assert repr(s) == exp
def test_categorical_series_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
assert repr(s) == exp
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
8 days 01:00:00, 9 days 01:00:00]""" # noqa
assert repr(s) == exp
def test_categorical_series_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" # noqa
assert repr(s) == exp
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
8 days 01:00:00 < 9 days 01:00:00]""" # noqa
assert repr(s) == exp
def test_categorical_index_repr(self):
idx = pd.CategoricalIndex(pd.Categorical([1, 2, 3]))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')""" # noqa
assert repr(idx) == exp
i = pd.CategoricalIndex(pd.Categorical(np.arange(10)))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')""" # noqa
assert repr(i) == exp
def test_categorical_index_repr_ordered(self):
i = pd.CategoricalIndex(pd.Categorical([1, 2, 3], ordered=True))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')""" # noqa
assert repr(i) == exp
i = pd.CategoricalIndex(pd.Categorical(np.arange(10), ordered=True))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')""" # noqa
assert repr(i) == exp
def test_categorical_index_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')""" # noqa
assert repr(i) == exp
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')""" # noqa
assert repr(i) == exp
def test_categorical_index_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')""" # noqa
assert repr(i) == exp
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" # noqa
assert repr(i) == exp
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx), ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
'2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
'2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" # noqa
assert repr(i) == exp
def test_categorical_index_repr_period(self):
# test all length
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=1)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')""" # noqa
assert repr(i) == exp
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=2)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')""" # noqa
assert repr(i) == exp
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=3)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')""" # noqa
assert repr(i) == exp
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" # noqa
assert repr(i) == exp
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx)))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
'2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
'2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" # noqa
assert repr(i) == exp
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')""" # noqa
assert repr(i) == exp
def test_categorical_index_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')""" # noqa
assert repr(i) == exp
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')""" # noqa
assert repr(i) == exp
def test_categorical_index_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')""" # noqa
assert repr(i) == exp
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')""" # noqa
assert repr(i) == exp
def test_categorical_index_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')""" # noqa
assert repr(i) == exp
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')""" # noqa
assert repr(i) == exp
def test_categorical_frame(self):
# normal DataFrame
dt = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
p = pd.period_range('2011-01', freq='M', periods=5)
df = pd.DataFrame({'dt': dt, 'p': p})
exp = """ dt p
0 2011-01-01 09:00:00-05:00 2011-01
1 2011-01-01 10:00:00-05:00 2011-02
2 2011-01-01 11:00:00-05:00 2011-03
3 2011-01-01 12:00:00-05:00 2011-04
4 2011-01-01 13:00:00-05:00 2011-05"""
df = pd.DataFrame({'dt': pd.Categorical(dt), 'p': pd.Categorical(p)})
assert repr(df) == exp
def test_info(self):
# make sure it works
n = 2500
df = DataFrame({'int64': np.random.randint(100, size=n)})
df['category'] = Series(np.array(list('abcdefghij')).take(
np.random.randint(0, 10, size=n))).astype('category')
df.isna()
buf = compat.StringIO()
df.info(buf=buf)
df2 = df[df['category'] == 'd']
buf = compat.StringIO()
df2.info(buf=buf)
def test_groupby_sort(self):
# http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
res = self.cat.groupby(['value_group'])['value_group'].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = pd.CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Series(Categorical(["a", "b", "c", "d"], ordered=False))
pytest.raises(TypeError, lambda: cat.min())
pytest.raises(TypeError, lambda: cat.max())
cat = Series(Categorical(["a", "b", "c", "d"], ordered=True))
_min = cat.min()
_max = cat.max()
assert _min == "a"
assert _max == "d"
cat = Series(Categorical(["a", "b", "c", "d"], categories=[
'd', 'c', 'b', 'a'], ordered=True))
_min = cat.min()
_max = cat.max()
assert _min == "d"
assert _max == "a"
cat = Series(Categorical(
[np.nan, "b", "c", np.nan], categories=['d', 'c', 'b', 'a'
], ordered=True))
_min = cat.min()
_max = cat.max()
assert np.isnan(_min)
assert _max == "b"
cat = Series(Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True))
_min = cat.min()
_max = cat.max()
assert np.isnan(_min)
assert _max == 1
def test_mode(self):
s = Series(Categorical([1, 1, 2, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 1, 1, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5, 1], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True))
res = s.mode()
exp = Series(Categorical([5, 4, 3, 2, 1], categories=[5, 4, 3, 2, 1],
ordered=True))
tm.assert_series_equal(res, exp)
def test_value_counts(self):
# GH 12835
cats = pd.Categorical(["a", "b", "c", "c", "c", "b"],
categories=["c", "a", "b", "d"])
s = pd.Series(cats, name='xxx')
res = s.value_counts(sort=False)
exp_index = pd.CategoricalIndex(["c", "a", "b", "d"],
categories=cats.categories)
exp = Series([3, 1, 2, 0], name='xxx', index=exp_index)
tm.assert_series_equal(res, exp)
res = s.value_counts(sort=True)
exp_index = pd.CategoricalIndex(["c", "b", "a", "d"],
categories=cats.categories)
exp = Series([3, 2, 1, 0], name='xxx', index=exp_index)
tm.assert_series_equal(res, exp)
# check object dtype handles the Series.name as the same
# (tested in test_base.py)
s = pd.Series(["a", "b", "c", "c", "c", "b"], name='xxx')
res = s.value_counts()
exp = Series([3, 2, 1], name='xxx', index=["c", "b", "a"])
tm.assert_series_equal(res, exp)
def test_value_counts_with_nan(self):
# see gh-9443
# sanity check
s = pd.Series(["a", "b", "a"], dtype="category")
exp = pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"]))
res = s.value_counts(dropna=True)
tm.assert_series_equal(res, exp)
res = s.value_counts(dropna=True)
tm.assert_series_equal(res, exp)
# same Series via two different constructions --> same behaviour
series = [
pd.Series(["a", "b", None, "a", None, None], dtype="category"),
pd.Series(pd.Categorical(["a", "b", None, "a", None, None],
categories=["a", "b"]))
]
for s in series:
# None is a NaN value, so we exclude its count here
exp = pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"]))
res = s.value_counts(dropna=True)
tm.assert_series_equal(res, exp)
# we don't exclude the count of None and sort by counts
exp = pd.Series(
[3, 2, 1], index=pd.CategoricalIndex([np.nan, "a", "b"]))
res = s.value_counts(dropna=False)
tm.assert_series_equal(res, exp)
# When we aren't sorting by counts, and np.nan isn't a
# category, it should be last.
exp = pd.Series(
[2, 1, 3], index=pd.CategoricalIndex(["a", "b", np.nan]))
res = s.value_counts(dropna=False, sort=False)
tm.assert_series_equal(res, exp)
def test_groupby(self):
cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"], ordered=True)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = pd.CategoricalIndex(['a', 'b', 'c', 'd'], name='b',
ordered=True)
expected = DataFrame({'a': [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b").mean()
tm.assert_frame_equal(result, expected)
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A")
exp_idx = pd.CategoricalIndex(['a', 'b', 'z'], name='A', ordered=True)
expected = DataFrame({'values': Series([3, 7, np.nan], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers
gb = df.groupby(['A', 'B'])
exp_index = pd.MultiIndex.from_product(
[Categorical(["a", "b", "z"], ordered=True),
Categorical(["c", "d", "y"], ordered=True)],
names=['A', 'B'])
expected = DataFrame({'values': [1, 2, np.nan, 3, 4, np.nan,
np.nan, np.nan, np.nan]},
index=exp_index)
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers with a non-cat
df = df.copy()
df['C'] = ['foo', 'bar'] * 2
gb = df.groupby(['A', 'B', 'C'])
exp_index = pd.MultiIndex.from_product(
[Categorical(["a", "b", "z"], ordered=True),
Categorical(["c", "d", "y"], ordered=True),
['foo', 'bar']],
names=['A', 'B', 'C'])
expected = DataFrame({'values': Series(
np.nan, index=exp_index)}).sort_index()
expected.iloc[[1, 2, 7, 8], 0] = [1, 2, 3, 4]
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = pd.DataFrame([[1, 'John P. Doe'], [2, 'Jane Dove'],
[1, 'John P. Doe']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name)
g = x.groupby(['person_id'])
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[['person_name']])
result = x.drop_duplicates('person_name')
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates('person_name').iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name='person_id')
expected['person_name'] = expected['person_name'].astype('object')
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'])
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']])
# Filter
tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a'])
tm.assert_frame_equal(df.groupby(c).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'])
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']])
# GH 9603
df = pd.DataFrame({'a': [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=pd.Categorical(list('abcd')))
result = df.groupby(c).apply(len)
exp_index = pd.CategoricalIndex(c.values.categories,
ordered=c.values.ordered)
expected = pd.Series([1, 0, 0, 0], index=exp_index)
expected.index.name = 'a'
tm.assert_series_equal(result, expected)
def test_pivot_table(self):
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'])
exp_index = pd.MultiIndex.from_product(
[Categorical(["a", "b", "z"], ordered=True),
Categorical(["c", "d", "y"], ordered=True)],
names=['A', 'B'])
expected = DataFrame(
{'values': [1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan]},
index=exp_index)
tm.assert_frame_equal(result, expected)
def test_count(self):
s = Series(Categorical([np.nan, 1, 2, np.nan],
categories=[5, 4, 3, 2, 1], ordered=True))
result = s.count()
assert result == 2
def test_sort_values(self):
c = Categorical(["a", "b", "b", "a"], ordered=False)
cat = Series(c.copy())
# sort in the categories order
expected = Series(
Categorical(["a", "a", "b", "b"],
ordered=False), index=[0, 3, 1, 2])
result = cat.sort_values()
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "c", "b", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=np.object_)
tm.assert_numpy_array_equal(res.__array__(), exp)
cat = Series(Categorical(["a", "c", "b", "d"], categories=[
"a", "b", "c", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=np.object_)
tm.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(res.__array__(), exp)
raw_cat1 = Categorical(["a", "b", "c", "d"],
categories=["a", "b", "c", "d"], ordered=False)
raw_cat2 = Categorical(["a", "b", "c", "d"],
categories=["d", "c", "b", "a"], ordered=True)
s = ["a", "b", "c", "d"]
df = DataFrame({"unsort": raw_cat1,
"sort": raw_cat2,
"string": s,
"values": [1, 2, 3, 4]})
# Cats must be sorted in a dataframe
res = df.sort_values(by=["string"], ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(res["sort"].values.__array__(), exp)
assert res["sort"].dtype == "category"
res = df.sort_values(by=["sort"], ascending=False)
exp = df.sort_values(by=["string"], ascending=True)
tm.assert_series_equal(res["values"], exp["values"])
assert res["sort"].dtype == "category"
assert res["unsort"].dtype == "category"
# unordered cat, but we allow this
df.sort_values(by=["unsort"], ascending=False)
# multi-columns sort
# GH 7848
df = DataFrame({"id": [6, 5, 4, 3, 2, 1],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"], ordered=True)
df['grade'] = df['grade'].cat.set_categories(['b', 'e', 'a'])
# sorts 'grade' according to the order of the categories
result = df.sort_values(by=['grade'])
expected = df.iloc[[1, 2, 5, 0, 3, 4]]
tm.assert_frame_equal(result, expected)
# multi
result = df.sort_values(by=['grade', 'id'])
expected = df.iloc[[2, 1, 5, 4, 3, 0]]
tm.assert_frame_equal(result, expected)
def test_slicing(self):
cat = Series(Categorical([1, 2, 3, 4]))
reversed = cat[::-1]
exp = np.array([4, 3, 2, 1], dtype=np.int64)
tm.assert_numpy_array_equal(reversed.__array__(), exp)
df = DataFrame({'value': (np.arange(100) + 1).astype('int64')})
df['D'] = pd.cut(df.value, bins=[0, 25, 50, 75, 100])
expected = Series([11, Interval(0, 25)], index=['value', 'D'], name=10)
result = df.iloc[10]
tm.assert_series_equal(result, expected)
expected = DataFrame({'value': np.arange(11, 21).astype('int64')},
index=np.arange(10, 20).astype('int64'))
expected['D'] = pd.cut(expected.value, bins=[0, 25, 50, 75, 100])
result = df.iloc[10:20]
tm.assert_frame_equal(result, expected)
expected = Series([9, Interval(0, 25)], index=['value', 'D'], name=8)
result = df.loc[8]
tm.assert_series_equal(result, expected)
def test_slicing_and_getting_ops(self):
# systematically test the slicing operations:
# for all slicing ops:
# - returning a dataframe
# - returning a column
# - returning a row
# - returning a single value
cats = pd.Categorical(
["a", "c", "b", "c", "c", "c", "c"], categories=["a", "b", "c"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 2, 3, 4, 5, 6, 7]
df = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
cats2 = pd.Categorical(["b", "c"], categories=["a", "b", "c"])
idx2 = pd.Index(["j", "k"])
values2 = [3, 4]
# 2:4,: | "j":"k",:
exp_df = pd.DataFrame({"cats": cats2, "values": values2}, index=idx2)
# :,"cats" | :,0
exp_col = pd.Series(cats, index=idx, name='cats')
# "j",: | 2,:
exp_row = pd.Series(["b", 3], index=["cats", "values"], dtype="object",
name="j")
# "j","cats | 2,0
exp_val = "b"
# iloc
# frame
res_df = df.iloc[2:4, :]
tm.assert_frame_equal(res_df, exp_df)
assert is_categorical_dtype(res_df["cats"])
# row
res_row = df.iloc[2, :]
tm.assert_series_equal(res_row, exp_row)
assert isinstance(res_row["cats"], compat.string_types)
# col
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
assert is_categorical_dtype(res_col)
# single value
res_val = df.iloc[2, 0]
assert res_val == exp_val
# loc
# frame
res_df = df.loc["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
assert is_categorical_dtype(res_df["cats"])
# row
res_row = df.loc["j", :]
tm.assert_series_equal(res_row, exp_row)
assert isinstance(res_row["cats"], compat.string_types)
# col
res_col = df.loc[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
assert is_categorical_dtype(res_col)
# single value
res_val = df.loc["j", "cats"]
assert res_val == exp_val
# ix
# frame
# res_df = df.loc["j":"k",[0,1]] # doesn't work?
res_df = df.loc["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
assert is_categorical_dtype(res_df["cats"])
# row
res_row = df.loc["j", :]
tm.assert_series_equal(res_row, exp_row)
assert isinstance(res_row["cats"], compat.string_types)
# col
res_col = df.loc[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
assert is_categorical_dtype(res_col)
# single value
res_val = df.loc["j", df.columns[0]]
assert res_val == exp_val
# iat
res_val = df.iat[2, 0]
assert res_val == exp_val
# at
res_val = df.at["j", "cats"]
assert res_val == exp_val
# fancy indexing
exp_fancy = df.iloc[[2]]
res_fancy = df[df["cats"] == "b"]
tm.assert_frame_equal(res_fancy, exp_fancy)
res_fancy = df[df["values"] == 3]
tm.assert_frame_equal(res_fancy, exp_fancy)
# get_value
res_val = df.get_value("j", "cats")
assert res_val == exp_val
# i : int, slice, or sequence of integers
res_row = df.iloc[2]
tm.assert_series_equal(res_row, exp_row)
assert isinstance(res_row["cats"], compat.string_types)
res_df = df.iloc[slice(2, 4)]
tm.assert_frame_equal(res_df, exp_df)
assert is_categorical_dtype(res_df["cats"])
res_df = df.iloc[[2, 3]]
tm.assert_frame_equal(res_df, exp_df)
assert is_categorical_dtype(res_df["cats"])
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
assert is_categorical_dtype(res_col)
res_df = df.iloc[:, slice(0, 2)]
tm.assert_frame_equal(res_df, df)
assert is_categorical_dtype(res_df["cats"])
res_df = df.iloc[:, [0, 1]]
tm.assert_frame_equal(res_df, df)
assert is_categorical_dtype(res_df["cats"])
def test_slicing_doc_examples(self):
# GH 7918
cats = Categorical(["a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c"])
idx = Index(["h", "i", "j", "k", "l", "m", "n", ])
values = [1, 2, 2, 2, 3, 4, 5]
df = DataFrame({"cats": cats, "values": values}, index=idx)
result = df.iloc[2:4, :]
expected = DataFrame(
{"cats": Categorical(['b', 'b'], categories=['a', 'b', 'c']),
"values": [2, 2]}, index=['j', 'k'])
tm.assert_frame_equal(result, expected)
result = df.iloc[2:4, :].dtypes
expected = Series(['category', 'int64'], ['cats', 'values'])
tm.assert_series_equal(result, expected)
result = df.loc["h":"j", "cats"]
expected = Series(Categorical(['a', 'b', 'b'],
categories=['a', 'b', 'c']),
index=['h', 'i', 'j'], name='cats')
tm.assert_series_equal(result, expected)
result = df.loc["h":"j", df.columns[0:1]]
expected = DataFrame({'cats': Categorical(['a', 'b', 'b'],
categories=['a', 'b', 'c'])},
index=['h', 'i', 'j'])
tm.assert_frame_equal(result, expected)
def test_assigning_ops(self):
# systematically test the assigning operations:
# for all slicing ops:
# for value in categories and value not in categories:
# - assign a single value -> exp_single_cats_value
# - assign a complete row (mixed values) -> exp_single_row
# assign multiple rows (mixed values) (-> array) -> exp_multi_row
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
cats = pd.Categorical(["a", "a", "a", "a", "a", "a", "a"],
categories=["a", "b"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 1, 1, 1, 1, 1, 1]
orig = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
# changed single row
cats1 = pd.Categorical(["a", "a", "b", "a", "a", "a", "a"],
categories=["a", "b"])
idx1 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values1 = [1, 1, 2, 1, 1, 1, 1]
exp_single_row = pd.DataFrame({"cats": cats1,
"values": values1}, index=idx1)
# changed multiple rows
cats2 = pd.Categorical(["a", "a", "b", "b", "a", "a", "a"],
categories=["a", "b"])
idx2 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values2 = [1, 1, 2, 2, 1, 1, 1]
exp_multi_row = pd.DataFrame({"cats": cats2,
"values": values2}, index=idx2)
# changed part of the cats column
cats3 = pd.Categorical(
["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx3 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values3 = [1, 1, 1, 1, 1, 1, 1]
exp_parts_cats_col = pd.DataFrame(
{"cats": cats3,
"values": values3}, index=idx3)
# changed single value in cats col
cats4 = pd.Categorical(
["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx4 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values4 = [1, 1, 1, 1, 1, 1, 1]
exp_single_cats_value = pd.DataFrame(
{"cats": cats4,
"values": values4}, index=idx4)
# iloc
# ###############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.iloc[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.iloc[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iloc[2, 0] = "c"
pytest.raises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.iloc[2, :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.iloc[2, :] = ["c", 2]
pytest.raises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.iloc[2:4, :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.iloc[2:4, :] = [["c", 2], ["c", 2]]
pytest.raises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with pytest.raises(ValueError):
# different values
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError):
df.iloc[2:4, 0] = ["c", "c"]
# loc
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.loc["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.loc[df.index == "j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.loc["j", "cats"] = "c"
pytest.raises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.loc["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.loc["j", :] = ["c", 2]
pytest.raises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.loc["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.loc["j":"k", :] = [["c", 2], ["c", 2]]
pytest.raises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with pytest.raises(ValueError):
# different values
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError):
df.loc["j":"k", "cats"] = ["c", "c"]
# loc
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.loc["j", df.columns[0]] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.loc[df.index == "j", df.columns[0]] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.loc["j", df.columns[0]] = "c"
pytest.raises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.loc["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.loc["j", :] = ["c", 2]
pytest.raises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.loc["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.loc["j":"k", :] = [["c", 2], ["c", 2]]
pytest.raises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", df.columns[0]] = pd.Categorical(
["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.loc["j":"k", df.columns[0]] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with pytest.raises(ValueError):
# different values
df = orig.copy()
df.loc["j":"k", df.columns[0]] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", df.columns[0]] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError):
df.loc["j":"k", df.columns[0]] = ["c", "c"]
# iat
df = orig.copy()
df.iat[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iat[2, 0] = "c"
pytest.raises(ValueError, f)
# at
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.at["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.at["j", "cats"] = "c"
pytest.raises(ValueError, f)
# fancy indexing
catsf = pd.Categorical(["a", "a", "c", "c", "a", "a", "a"],
categories=["a", "b", "c"])
idxf = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
valuesf = [1, 1, 3, 3, 1, 1, 1]
df = pd.DataFrame({"cats": catsf, "values": valuesf}, index=idxf)
exp_fancy = exp_multi_row.copy()
exp_fancy["cats"].cat.set_categories(["a", "b", "c"], inplace=True)
df[df["cats"] == "c"] = ["b", 2]
# category c is kept in .categories
tm.assert_frame_equal(df, exp_fancy)
# set_value
df = orig.copy()
df.set_value("j", "cats", "b")
tm.assert_frame_equal(df, exp_single_cats_value)
def f():
df = orig.copy()
df.set_value("j", "cats", "c")
pytest.raises(ValueError, f)
# Assigning a Category to parts of a int/... column uses the values of
# the Catgorical
df = pd.DataFrame({"a": [1, 1, 1, 1, 1],
"b": ["a", "a", "a", "a", "a"]})
exp = pd.DataFrame({"a": [1, "b", "b", 1, 1],
"b": ["a", "a", "b", "b", "a"]})
df.loc[1:2, "a"] = pd.Categorical(["b", "b"], categories=["a", "b"])
df.loc[2:3, "b"] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp)
# Series
orig = Series(pd.Categorical(["b", "b"], categories=["a", "b"]))
s = orig.copy()
s[:] = "a"
exp = Series(pd.Categorical(["a", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[1] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[s.index > 0] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[[False, True]] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s.index = ["x", "y"]
s["y"] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]),
index=["x", "y"])
tm.assert_series_equal(s, exp)
# ensure that one can set something to np.nan
s = Series(Categorical([1, 2, 3]))
exp = Series(Categorical([1, np.nan, 3], categories=[1, 2, 3]))
s[1] = np.nan
tm.assert_series_equal(s, exp)
def test_comparisons(self):
tests_data = [(list("abc"), list("cba"), list("bbb")),
([1, 2, 3], [3, 2, 1], [2, 2, 2])]
for data, reverse, base in tests_data:
cat_rev = pd.Series(pd.Categorical(data, categories=reverse,
ordered=True))
cat_rev_base = pd.Series(pd.Categorical(base, categories=reverse,
ordered=True))
cat = pd.Series(pd.Categorical(data, ordered=True))
cat_base = pd.Series(pd.Categorical(
base, categories=cat.cat.categories, ordered=True))
s = Series(base)
a = np.array(base)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = Series([True, False, False])
tm.assert_series_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = Series([False, False, True])
tm.assert_series_equal(res_rev, exp_rev)
res = cat > cat_base
exp = Series([False, False, True])
tm.assert_series_equal(res, exp)
scalar = base[1]
res = cat > scalar
exp = Series([False, False, True])
exp2 = cat.values > scalar
tm.assert_series_equal(res, exp)
tm.assert_numpy_array_equal(res.values, exp2)
res_rev = cat_rev > scalar
exp_rev = Series([True, False, False])
exp_rev2 = cat_rev.values > scalar
tm.assert_series_equal(res_rev, exp_rev)
tm.assert_numpy_array_equal(res_rev.values, exp_rev2)
# Only categories with same categories can be compared
def f():
cat > cat_rev
pytest.raises(TypeError, f)
# categorical cannot be compared to Series or numpy array, and also
# not the other way around
pytest.raises(TypeError, lambda: cat > s)
pytest.raises(TypeError, lambda: cat_rev > s)
pytest.raises(TypeError, lambda: cat > a)
pytest.raises(TypeError, lambda: cat_rev > a)
pytest.raises(TypeError, lambda: s < cat)
pytest.raises(TypeError, lambda: s < cat_rev)
pytest.raises(TypeError, lambda: a < cat)
pytest.raises(TypeError, lambda: a < cat_rev)
# unequal comparison should raise for unordered cats
cat = Series(Categorical(list("abc")))
def f():
cat > "b"
pytest.raises(TypeError, f)
cat = Series(Categorical(list("abc"), ordered=False))
def f():
cat > "b"
pytest.raises(TypeError, f)
# https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Series(Categorical(list("abc"), ordered=True))
pytest.raises(TypeError, lambda: cat < "d")
pytest.raises(TypeError, lambda: cat > "d")
pytest.raises(TypeError, lambda: "d" < cat)
pytest.raises(TypeError, lambda: "d" > cat)
tm.assert_series_equal(cat == "d", Series([False, False, False]))
tm.assert_series_equal(cat != "d", Series([True, True, True]))
# And test NaN handling...
cat = Series(Categorical(["a", "b", "c", np.nan]))
exp = Series([True, True, True, False])
res = (cat == cat)
tm.assert_series_equal(res, exp)
def test_cat_equality(self):
# GH 8938
# allow equality comparisons
a = Series(list('abc'), dtype="category")
b = Series(list('abc'), dtype="object")
c = Series(['a', 'b', 'cc'], dtype="object")
d = Series(list('acb'), dtype="object")
e = Categorical(list('abc'))
f = Categorical(list('acb'))
# vs scalar
assert not (a == 'a').all()
assert ((a != 'a') == ~(a == 'a')).all()
assert not ('a' == a).all()
assert (a == 'a')[0]
assert ('a' == a)[0]
assert not ('a' != a)[0]
# vs list-like
assert (a == a).all()
assert not (a != a).all()
assert (a == list(a)).all()
assert (a == b).all()
assert (b == a).all()
assert ((~(a == b)) == (a != b)).all()
assert ((~(b == a)) == (b != a)).all()
assert not (a == c).all()
assert not (c == a).all()
assert not (a == d).all()
assert not (d == a).all()
# vs a cat-like
assert (a == e).all()
assert (e == a).all()
assert not (a == f).all()
assert not (f == a).all()
assert ((~(a == e) == (a != e)).all())
assert ((~(e == a) == (e != a)).all())
assert ((~(a == f) == (a != f)).all())
assert ((~(f == a) == (f != a)).all())
# non-equality is not comparable
pytest.raises(TypeError, lambda: a < b)
pytest.raises(TypeError, lambda: b < a)
pytest.raises(TypeError, lambda: a > b)
pytest.raises(TypeError, lambda: b > a)
@pytest.mark.parametrize('ctor', [
lambda *args, **kwargs: Categorical(*args, **kwargs),
lambda *args, **kwargs: Series(Categorical(*args, **kwargs)),
])
def test_unordered_different_order_equal(self, ctor):
# https://github.com/pandas-dev/pandas/issues/16014
c1 = ctor(['a', 'b'], categories=['a', 'b'], ordered=False)
c2 = ctor(['a', 'b'], categories=['b', 'a'], ordered=False)
assert (c1 == c2).all()
c1 = ctor(['a', 'b'], categories=['a', 'b'], ordered=False)
c2 = ctor(['b', 'a'], categories=['b', 'a'], ordered=False)
assert (c1 != c2).all()
c1 = ctor(['a', 'a'], categories=['a', 'b'], ordered=False)
c2 = ctor(['b', 'b'], categories=['b', 'a'], ordered=False)
assert (c1 != c2).all()
c1 = ctor(['a', 'a'], categories=['a', 'b'], ordered=False)
c2 = ctor(['a', 'b'], categories=['b', 'a'], ordered=False)
result = c1 == c2
tm.assert_numpy_array_equal(np.array(result), np.array([True, False]))
def test_unordered_different_categories_raises(self):
c1 = Categorical(['a', 'b'], categories=['a', 'b'], ordered=False)
c2 = Categorical(['a', 'c'], categories=['c', 'a'], ordered=False)
with tm.assert_raises_regex(TypeError,
"Categoricals can only be compared"):
c1 == c2
def test_compare_different_lengths(self):
c1 = Categorical([], categories=['a', 'b'])
c2 = Categorical([], categories=['a'])
msg = "Categories are different lengths"
with tm.assert_raises_regex(TypeError, msg):
c1 == c2
def test_concat_append(self):
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = pd.DataFrame({"cats": cat, "vals": vals})
cat2 = pd.Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = pd.DataFrame({"cats": cat2,
"vals": vals2}, index=pd.Index([0, 1, 0, 1]))
tm.assert_frame_equal(pd.concat([df, df]), exp)
tm.assert_frame_equal(df.append(df), exp)
# GH 13524 can concat different categories
cat3 = pd.Categorical(["a", "b"], categories=["a", "b", "c"])
vals3 = [1, 2]
df_different_categories = pd.DataFrame({"cats": cat3, "vals": vals3})
res = pd.concat([df, df_different_categories], ignore_index=True)
exp = pd.DataFrame({"cats": list('abab'), "vals": [1, 2, 1, 2]})
tm.assert_frame_equal(res, exp)
res = df.append(df_different_categories, ignore_index=True)
tm.assert_frame_equal(res, exp)
def test_concat_append_gh7864(self):
# GH 7864
# make sure ordering is preserverd
df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"])
df['grade'].cat.set_categories(['e', 'a', 'b'])
df1 = df[0:3]
df2 = df[3:]
tm.assert_index_equal(df['grade'].cat.categories,
df1['grade'].cat.categories)
tm.assert_index_equal(df['grade'].cat.categories,
df2['grade'].cat.categories)
dfx = pd.concat([df1, df2])
tm.assert_index_equal(df['grade'].cat.categories,
dfx['grade'].cat.categories)
dfa = df1.append(df2)
tm.assert_index_equal(df['grade'].cat.categories,
dfa['grade'].cat.categories)
def test_concat_preserve(self):
# GH 8641 series concat not preserving category dtype
# GH 13524 can concat different categories
s = Series(list('abc'), dtype='category')
s2 = Series(list('abd'), dtype='category')
exp = Series(list('abcabd'))
res = pd.concat([s, s2], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series(list('abcabc'), dtype='category')
res = pd.concat([s, s], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series(list('abcabc'), index=[0, 1, 2, 0, 1, 2],
dtype='category')
res = pd.concat([s, s])
tm.assert_series_equal(res, exp)
a = Series(np.arange(6, dtype='int64'))
b = Series(list('aabbca'))
df2 = DataFrame({'A': a,
'B': b.astype('category', categories=list('cab'))})
res = pd.concat([df2, df2])
exp = DataFrame({'A': pd.concat([a, a]),
'B': pd.concat([b, b]).astype(
'category', categories=list('cab'))})
tm.assert_frame_equal(res, exp)
def test_categorical_index_preserver(self):
a = Series(np.arange(6, dtype='int64'))
b = Series(list('aabbca'))
df2 = DataFrame({'A': a,
'B': b.astype('category', categories=list('cab'))
}).set_index('B')
result = pd.concat([df2, df2])
expected = DataFrame({'A': pd.concat([a, a]),
'B': pd.concat([b, b]).astype(
'category', categories=list('cab'))
}).set_index('B')
tm.assert_frame_equal(result, expected)
# wrong catgories
df3 = DataFrame({'A': a,
'B': pd.Categorical(b, categories=list('abe'))
}).set_index('B')
pytest.raises(TypeError, lambda: pd.concat([df2, df3]))
def test_merge(self):
# GH 9426
right = DataFrame({'c': {0: 'a',
1: 'b',
2: 'c',
3: 'd',
4: 'e'},
'd': {0: 'null',
1: 'null',
2: 'null',
3: 'null',
4: 'null'}})
left = DataFrame({'a': {0: 'f',
1: 'f',
2: 'f',
3: 'f',
4: 'f'},
'b': {0: 'g',
1: 'g',
2: 'g',
3: 'g',
4: 'g'}})
df = pd.merge(left, right, how='left', left_on='b', right_on='c')
# object-object
expected = df.copy()
# object-cat
# note that we propagate the category
# because we don't have any matching rows
cright = right.copy()
cright['d'] = cright['d'].astype('category')
result = pd.merge(left, cright, how='left', left_on='b', right_on='c')
expected['d'] = expected['d'].astype('category', categories=['null'])
tm.assert_frame_equal(result, expected)
# cat-object
cleft = left.copy()
cleft['b'] = cleft['b'].astype('category')
result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
# cat-cat
cright = right.copy()
cright['d'] = cright['d'].astype('category')
cleft = left.copy()
cleft['b'] = cleft['b'].astype('category')
result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
def test_repeat(self):
# GH10183
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
exp = pd.Categorical(["a", "a", "b", "b"], categories=["a", "b"])
res = cat.repeat(2)
tm.assert_categorical_equal(res, exp)
def test_numpy_repeat(self):
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
exp = pd.Categorical(["a", "a", "b", "b"], categories=["a", "b"])
tm.assert_categorical_equal(np.repeat(cat, 2), exp)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.repeat, cat, 2, axis=1)
def test_reshape(self):
cat = pd.Categorical([], categories=["a", "b"])
tm.assert_produces_warning(FutureWarning, cat.reshape, 0)
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([], categories=["a", "b"])
tm.assert_categorical_equal(cat.reshape(0), cat)
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([], categories=["a", "b"])
tm.assert_categorical_equal(cat.reshape((5, -1)), cat)
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
tm.assert_categorical_equal(cat.reshape(cat.shape), cat)
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
tm.assert_categorical_equal(cat.reshape(cat.size), cat)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
msg = "can only specify one unknown dimension"
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
tm.assert_raises_regex(ValueError, msg, cat.reshape, (-2, -1))
def test_numpy_reshape(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
tm.assert_categorical_equal(np.reshape(cat, cat.shape), cat)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
msg = "the 'order' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.reshape,
cat, cat.shape, order='F')
def test_na_actions(self):
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
vals = ["a", "b", np.nan, "d"]
df = pd.DataFrame({"cats": cat, "vals": vals})
cat2 = pd.Categorical([1, 2, 3, 3], categories=[1, 2, 3])
vals2 = ["a", "b", "b", "d"]
df_exp_fill = pd.DataFrame({"cats": cat2, "vals": vals2})
cat3 = pd.Categorical([1, 2, 3], categories=[1, 2, 3])
vals3 = ["a", "b", np.nan]
df_exp_drop_cats = pd.DataFrame({"cats": cat3, "vals": vals3})
cat4 = pd.Categorical([1, 2], categories=[1, 2, 3])
vals4 = ["a", "b"]
df_exp_drop_all = pd.DataFrame({"cats": cat4, "vals": vals4})
# fillna
res = df.fillna(value={"cats": 3, "vals": "b"})
tm.assert_frame_equal(res, df_exp_fill)
def f():
df.fillna(value={"cats": 4, "vals": "c"})
pytest.raises(ValueError, f)
res = df.fillna(method='pad')
tm.assert_frame_equal(res, df_exp_fill)
res = df.dropna(subset=["cats"])
tm.assert_frame_equal(res, df_exp_drop_cats)
res = df.dropna()
tm.assert_frame_equal(res, df_exp_drop_all)
# make sure that fillna takes missing values into account
c = Categorical([np.nan, "b", np.nan], categories=["a", "b"])
df = pd.DataFrame({"cats": c, "vals": [1, 2, 3]})
cat_exp = Categorical(["a", "b", "a"], categories=["a", "b"])
df_exp = pd.DataFrame({"cats": cat_exp, "vals": [1, 2, 3]})
res = df.fillna("a")
tm.assert_frame_equal(res, df_exp)
# GH 14021
# np.nan should always be a is a valid filler
cat = Categorical([np.nan, 2, np.nan])
val = Categorical([np.nan, np.nan, np.nan])
df = DataFrame({"cats": cat, "vals": val})
res = df.fillna(df.median())
v_exp = [np.nan, np.nan, np.nan]
df_exp = pd.DataFrame({"cats": [2, 2, 2], "vals": v_exp},
dtype='category')
tm.assert_frame_equal(res, df_exp)
result = df.cats.fillna(np.nan)
tm.assert_series_equal(result, df.cats)
result = df.vals.fillna(np.nan)
tm.assert_series_equal(result, df.vals)
idx = pd.DatetimeIndex(['2011-01-01 09:00', '2016-01-01 23:45',
'2011-01-01 09:00', pd.NaT, pd.NaT])
df = DataFrame({'a': pd.Categorical(idx)})
tm.assert_frame_equal(df.fillna(value=pd.NaT), df)
idx = pd.PeriodIndex(['2011-01', '2011-01', '2011-01',
pd.NaT, pd.NaT], freq='M')
df = DataFrame({'a': pd.Categorical(idx)})
tm.assert_frame_equal(df.fillna(value=pd.NaT), df)
idx = pd.TimedeltaIndex(['1 days', '2 days',
'1 days', pd.NaT, pd.NaT])
df = pd.DataFrame({'a': pd.Categorical(idx)})
tm.assert_frame_equal(df.fillna(value=pd.NaT), df)
def test_astype_to_other(self):
s = self.cat['value_group']
expected = s
tm.assert_series_equal(s.astype('category'), expected)
tm.assert_series_equal(s.astype(CategoricalDtype()), expected)
pytest.raises(ValueError, lambda: s.astype('float64'))
cat = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']))
exp = Series(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
tm.assert_series_equal(cat.astype('str'), exp)
s2 = Series(Categorical(['1', '2', '3', '4']))
exp2 = Series([1, 2, 3, 4]).astype(int)
tm.assert_series_equal(s2.astype('int'), exp2)
# object don't sort correctly, so just compare that we have the same
# values
def cmp(a, b):
tm.assert_almost_equal(
np.sort(np.unique(a)), np.sort(np.unique(b)))
expected = Series(np.array(s.values), name='value_group')
cmp(s.astype('object'), expected)
cmp(s.astype(np.object_), expected)
# array conversion
tm.assert_almost_equal(np.array(s), np.array(s.values))
# valid conversion
for valid in [lambda x: x.astype('category'),
lambda x: x.astype(CategoricalDtype()),
lambda x: x.astype('object').astype('category'),
lambda x: x.astype('object').astype(
CategoricalDtype())
]:
result = valid(s)
# compare series values
# internal .categories can't be compared because it is sorted
tm.assert_series_equal(result, s, check_categorical=False)
# invalid conversion (these are NOT a dtype)
for invalid in [lambda x: x.astype(pd.Categorical),
lambda x: x.astype('object').astype(pd.Categorical)]:
pytest.raises(TypeError, lambda: invalid(s))
def test_astype_categorical(self):
cat = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
tm.assert_categorical_equal(cat, cat.astype('category'))
tm.assert_almost_equal(np.array(cat), cat.astype('object'))
pytest.raises(ValueError, lambda: cat.astype(float))
def test_to_records(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# to record array
# this coerces
result = df.to_records()
expected = np.rec.array([(0, 'a'), (1, 'b'), (2, 'c')],
dtype=[('index', '=i8'), ('0', 'O')])
tm.assert_almost_equal(result, expected)
def test_numeric_like_ops(self):
# numeric ops should not succeed
for op in ['__add__', '__sub__', '__mul__', '__truediv__']:
pytest.raises(TypeError,
lambda: getattr(self.cat, op)(self.cat))
# reduction ops should not succeed (unless specifically defined, e.g.
# min/max)
s = self.cat['value_group']
for op in ['kurt', 'skew', 'var', 'std', 'mean', 'sum', 'median']:
pytest.raises(TypeError,
lambda: getattr(s, op)(numeric_only=False))
# mad technically works because it takes always the numeric data
# numpy ops
s = pd.Series(pd.Categorical([1, 2, 3, 4]))
pytest.raises(TypeError, lambda: np.sum(s))
# numeric ops on a Series
for op in ['__add__', '__sub__', '__mul__', '__truediv__']:
pytest.raises(TypeError, lambda: getattr(s, op)(2))
# invalid ufunc
pytest.raises(TypeError, lambda: np.log(s))
def test_cat_tab_completition(self):
# test the tab completion display
ok_for_cat = ['categories', 'codes', 'ordered', 'set_categories',
'add_categories', 'remove_categories',
'rename_categories', 'reorder_categories',
'remove_unused_categories', 'as_ordered', 'as_unordered']
def get_dir(s):
results = [r for r in s.cat.__dir__() if not r.startswith('_')]
return list(sorted(set(results)))
s = Series(list('aabbcde')).astype('category')
results = get_dir(s)
tm.assert_almost_equal(results, list(sorted(set(ok_for_cat))))
def test_cat_accessor_api(self):
# GH 9322
from pandas.core.categorical import CategoricalAccessor
assert Series.cat is CategoricalAccessor
s = Series(list('aabbcde')).astype('category')
assert isinstance(s.cat, CategoricalAccessor)
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .cat accessor"):
invalid.cat
assert not hasattr(invalid, 'cat')
def test_cat_accessor_no_new_attributes(self):
# https://github.com/pandas-dev/pandas/issues/10673
c = Series(list('aabbcde')).astype('category')
with tm.assert_raises_regex(AttributeError,
"You cannot add any new attribute"):
c.cat.xlabel = "a"
def test_str_accessor_api_for_categorical(self):
# https://github.com/pandas-dev/pandas/issues/10661
from pandas.core.strings import StringMethods
s = Series(list('aabb'))
s = s + " " + s
c = s.astype('category')
assert isinstance(c.str, StringMethods)
# str functions, which need special arguments
special_func_defs = [
('cat', (list("zyxw"),), {"sep": ","}),
('center', (10,), {}),
('contains', ("a",), {}),
('count', ("a",), {}),
('decode', ("UTF-8",), {}),
('encode', ("UTF-8",), {}),
('endswith', ("a",), {}),
('extract', ("([a-z]*) ",), {"expand": False}),
('extract', ("([a-z]*) ",), {"expand": True}),
('extractall', ("([a-z]*) ",), {}),
('find', ("a",), {}),
('findall', ("a",), {}),
('index', (" ",), {}),
('ljust', (10,), {}),
('match', ("a"), {}), # deprecated...
('normalize', ("NFC",), {}),
('pad', (10,), {}),
('partition', (" ",), {"expand": False}), # not default
('partition', (" ",), {"expand": True}), # default
('repeat', (3,), {}),
('replace', ("a", "z"), {}),
('rfind', ("a",), {}),
('rindex', (" ",), {}),
('rjust', (10,), {}),
('rpartition', (" ",), {"expand": False}), # not default
('rpartition', (" ",), {"expand": True}), # default
('slice', (0, 1), {}),
('slice_replace', (0, 1, "z"), {}),
('split', (" ",), {"expand": False}), # default
('split', (" ",), {"expand": True}), # not default
('startswith', ("a",), {}),
('wrap', (2,), {}),
('zfill', (10,), {})
]
_special_func_names = [f[0] for f in special_func_defs]
# * get, join: they need a individual elements of type lists, but
# we can't make a categorical with lists as individual categories.
# -> `s.str.split(" ").astype("category")` will error!
# * `translate` has different interfaces for py2 vs. py3
_ignore_names = ["get", "join", "translate"]
str_func_names = [f for f in dir(s.str) if not (
f.startswith("_") or
f in _special_func_names or
f in _ignore_names)]
func_defs = [(f, (), {}) for f in str_func_names]
func_defs.extend(special_func_defs)
for func, args, kwargs in func_defs:
res = getattr(c.str, func)(*args, **kwargs)
exp = getattr(s.str, func)(*args, **kwargs)
if isinstance(res, pd.DataFrame):
tm.assert_frame_equal(res, exp)
else:
tm.assert_series_equal(res, exp)
invalid = Series([1, 2, 3]).astype('category')
with tm.assert_raises_regex(AttributeError,
"Can only use .str "
"accessor with string"):
invalid.str
assert not hasattr(invalid, 'str')
def test_dt_accessor_api_for_categorical(self):
# https://github.com/pandas-dev/pandas/issues/10661
from pandas.core.indexes.accessors import Properties
s_dr = Series(date_range('1/1/2015', periods=5, tz="MET"))
c_dr = s_dr.astype("category")
s_pr = Series(period_range('1/1/2015', freq='D', periods=5))
c_pr = s_pr.astype("category")
s_tdr = Series(timedelta_range('1 days', '10 days'))
c_tdr = s_tdr.astype("category")
# only testing field (like .day)
# and bool (is_month_start)
get_ops = lambda x: x._datetimelike_ops
test_data = [
("Datetime", get_ops(DatetimeIndex), s_dr, c_dr),
("Period", get_ops(PeriodIndex), s_pr, c_pr),
("Timedelta", get_ops(TimedeltaIndex), s_tdr, c_tdr)]
assert isinstance(c_dr.dt, Properties)
special_func_defs = [
('strftime', ("%Y-%m-%d",), {}),
('tz_convert', ("EST",), {}),
('round', ("D",), {}),
('floor', ("D",), {}),
('ceil', ("D",), {}),
('asfreq', ("D",), {}),
# ('tz_localize', ("UTC",), {}),
]
_special_func_names = [f[0] for f in special_func_defs]
# the series is already localized
_ignore_names = ['tz_localize', 'components']
for name, attr_names, s, c in test_data:
func_names = [f
for f in dir(s.dt)
if not (f.startswith("_") or f in attr_names or f in
_special_func_names or f in _ignore_names)]
func_defs = [(f, (), {}) for f in func_names]
for f_def in special_func_defs:
if f_def[0] in dir(s.dt):
func_defs.append(f_def)
for func, args, kwargs in func_defs:
res = getattr(c.dt, func)(*args, **kwargs)
exp = getattr(s.dt, func)(*args, **kwargs)
if isinstance(res, pd.DataFrame):
tm.assert_frame_equal(res, exp)
elif isinstance(res, pd.Series):
tm.assert_series_equal(res, exp)
else:
tm.assert_almost_equal(res, exp)
for attr in attr_names:
try:
res = getattr(c.dt, attr)
exp = getattr(s.dt, attr)
except Exception as e:
print(name, attr)
raise e
if isinstance(res, pd.DataFrame):
tm.assert_frame_equal(res, exp)
elif isinstance(res, pd.Series):
tm.assert_series_equal(res, exp)
else:
tm.assert_almost_equal(res, exp)
invalid = Series([1, 2, 3]).astype('category')
with tm.assert_raises_regex(
AttributeError, "Can only use .dt accessor with datetimelike"):
invalid.dt
assert not hasattr(invalid, 'str')
def test_concat_categorical(self):
# See GH 10177
df1 = pd.DataFrame(np.arange(18, dtype='int64').reshape(6, 3),
columns=["a", "b", "c"])
df2 = pd.DataFrame(np.arange(14, dtype='int64').reshape(7, 2),
columns=["a", "c"])
cat_values = ["one", "one", "two", "one", "two", "two", "one"]
df2['h'] = pd.Series(pd.Categorical(cat_values))
res = pd.concat((df1, df2), axis=0, ignore_index=True)
exp = pd.DataFrame({'a': [0, 3, 6, 9, 12, 15, 0, 2, 4, 6, 8, 10, 12],
'b': [1, 4, 7, 10, 13, 16, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
'c': [2, 5, 8, 11, 14, 17, 1, 3, 5, 7, 9, 11, 13],
'h': [None] * 6 + cat_values})
tm.assert_frame_equal(res, exp)
class TestCategoricalSubclassing(object):
def test_constructor(self):
sc = tm.SubclassedCategorical(['a', 'b', 'c'])
assert isinstance(sc, tm.SubclassedCategorical)
tm.assert_categorical_equal(sc, Categorical(['a', 'b', 'c']))
def test_from_array(self):
sc = tm.SubclassedCategorical.from_codes([1, 0, 2], ['a', 'b', 'c'])
assert isinstance(sc, tm.SubclassedCategorical)
exp = Categorical.from_codes([1, 0, 2], ['a', 'b', 'c'])
tm.assert_categorical_equal(sc, exp)
def test_map(self):
sc = tm.SubclassedCategorical(['a', 'b', 'c'])
res = sc.map(lambda x: x.upper())
assert isinstance(res, tm.SubclassedCategorical)
exp = Categorical(['A', 'B', 'C'])
tm.assert_categorical_equal(res, exp)
| bsd-3-clause |
wangwei7175878/tutorials | theanoTUT/theano10_regression_visualization/for_you_to_practice.py | 3 | 2012 | # View more python tutorials on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
# 10 - visualize result
"""
Please note, this code is only for python 3+. If you are using python 2+, please modify the code accordingly.
"""
from __future__ import print_function
import theano
import theano.tensor as T
import numpy as np
import matplotlib.pyplot as plt
class Layer(object):
def __init__(self, inputs, in_size, out_size, activation_function=None):
self.W = theano.shared(np.random.normal(0, 1, (in_size, out_size)))
self.b = theano.shared(np.zeros((out_size, )) + 0.1)
self.Wx_plus_b = T.dot(inputs, self.W) + self.b
self.activation_function = activation_function
if activation_function is None:
self.outputs = self.Wx_plus_b
else:
self.outputs = self.activation_function(self.Wx_plus_b)
# Make up some fake data
x_data = np.linspace(-1, 1, 300)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = np.square(x_data) - 0.5 + noise # y = x^2 - 0.5
# show the fake data
# plt.scatter(x_data, y_data)
# plt.show()
# determine the inputs dtype
x = T.dmatrix("x")
y = T.dmatrix("y")
# add layers
l1 = Layer(x, 1, 10, T.nnet.relu)
l2 = Layer(l1.outputs, 10, 1, None)
# compute the cost
cost = T.mean(T.square(l2.outputs - y))
# compute the gradients
gW1, gb1, gW2, gb2 = T.grad(cost, [l1.W, l1.b, l2.W, l2.b])
# apply gradient descent
learning_rate = 0.05
train = theano.function(
inputs=[x, y],
outputs=[cost],
updates=[(l1.W, l1.W - learning_rate * gW1),
(l1.b, l1.b - learning_rate * gb1),
(l2.W, l2.W - learning_rate * gW2),
(l2.b, l2.b - learning_rate * gb2)])
# prediction
predict = theano.function(inputs=[x], outputs=l2.outputs)
# plot the real data
for i in range(1000):
# training
err = train(x_data, y_data)
| mit |
cjayb/mne-python | mne/time_frequency/tfr.py | 2 | 95165 | """A module which implements the time-frequency estimation.
Morlet code inspired by Matlab code from Sheraz Khan & Brainstorm & SPM
"""
# Authors : Alexandre Gramfort <[email protected]>
# Hari Bharadwaj <[email protected]>
# Clement Moutard <[email protected]>
# Jean-Remi King <[email protected]>
#
# License : BSD (3-clause)
from copy import deepcopy
from functools import partial
from math import sqrt
import numpy as np
from scipy import linalg
from .multitaper import dpss_windows
from ..baseline import rescale
from ..fixes import fft, ifft
from ..parallel import parallel_func
from ..utils import (logger, verbose, _time_mask, _freq_mask, check_fname,
sizeof_fmt, GetEpochsMixin, _prepare_read_metadata,
fill_doc, _prepare_write_metadata, _check_event_id,
_gen_events, SizeMixin, _is_numeric, _check_option,
_validate_type)
from ..channels.channels import ContainsMixin, UpdateChannelsMixin
from ..channels.layout import _merge_ch_data, _pair_grad_sensors
from ..io.pick import (pick_info, _picks_to_idx, channel_type, _pick_inst,
_get_channel_types)
from ..io.meas_info import Info
from ..viz.utils import (figure_nobar, plt_show, _setup_cmap, warn,
_connection_line, _prepare_joint_axes,
_setup_vmin_vmax, _set_title_multiple_electrodes)
from ..externals.h5io import write_hdf5, read_hdf5
def morlet(sfreq, freqs, n_cycles=7.0, sigma=None, zero_mean=False):
"""Compute Morlet wavelets for the given frequency range.
Parameters
----------
sfreq : float
The sampling Frequency.
freqs : array
Frequency range of interest (1 x Frequencies).
n_cycles : float | array of float, default 7.0
Number of cycles. Fixed number or one per frequency.
sigma : float, default None
It controls the width of the wavelet ie its temporal
resolution. If sigma is None the temporal resolution
is adapted with the frequency like for all wavelet transform.
The higher the frequency the shorter is the wavelet.
If sigma is fixed the temporal resolution is fixed
like for the short time Fourier transform and the number
of oscillations increases with the frequency.
zero_mean : bool, default False
Make sure the wavelet has a mean of zero.
Returns
-------
Ws : list of array
The wavelets time series.
"""
Ws = list()
n_cycles = np.atleast_1d(n_cycles)
freqs = np.array(freqs)
if np.any(freqs <= 0):
raise ValueError("all frequencies in 'freqs' must be "
"greater than 0.")
if (n_cycles.size != 1) and (n_cycles.size != len(freqs)):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
# fixed or scale-dependent window
if sigma is None:
sigma_t = this_n_cycles / (2.0 * np.pi * f)
else:
sigma_t = this_n_cycles / (2.0 * np.pi * sigma)
# this scaling factor is proportional to (Tallon-Baudry 98):
# (sigma_t*sqrt(pi))^(-1/2);
t = np.arange(0., 5. * sigma_t, 1.0 / sfreq)
t = np.r_[-t[::-1], t[1:]]
oscillation = np.exp(2.0 * 1j * np.pi * f * t)
gaussian_enveloppe = np.exp(-t ** 2 / (2.0 * sigma_t ** 2))
if zero_mean: # to make it zero mean
real_offset = np.exp(- 2 * (np.pi * f * sigma_t) ** 2)
oscillation -= real_offset
W = oscillation * gaussian_enveloppe
W /= sqrt(0.5) * linalg.norm(W.ravel())
Ws.append(W)
return Ws
def _make_dpss(sfreq, freqs, n_cycles=7., time_bandwidth=4.0, zero_mean=False):
"""Compute DPSS tapers for the given frequency range.
Parameters
----------
sfreq : float
The sampling frequency.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,), default 7.
The number of cycles globally or for each frequency.
time_bandwidth : float, default 4.0
Time x Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1).
Default is 4.0, giving 3 good tapers.
zero_mean : bool | None, , default False
Make sure the wavelet has a mean of zero.
Returns
-------
Ws : list of array
The wavelets time series.
"""
Ws = list()
freqs = np.array(freqs)
if np.any(freqs <= 0):
raise ValueError("all frequencies in 'freqs' must be "
"greater than 0.")
if time_bandwidth < 2.0:
raise ValueError("time_bandwidth should be >= 2.0 for good tapers")
n_taps = int(np.floor(time_bandwidth - 1))
n_cycles = np.atleast_1d(n_cycles)
if n_cycles.size != 1 and n_cycles.size != len(freqs):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for m in range(n_taps):
Wm = list()
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
t_win = this_n_cycles / float(f)
t = np.arange(0., t_win, 1.0 / sfreq)
# Making sure wavelets are centered before tapering
oscillation = np.exp(2.0 * 1j * np.pi * f * (t - t_win / 2.))
# Get dpss tapers
tapers, conc = dpss_windows(t.shape[0], time_bandwidth / 2.,
n_taps)
Wk = oscillation * tapers[m]
if zero_mean: # to make it zero mean
real_offset = Wk.mean()
Wk -= real_offset
Wk /= sqrt(0.5) * linalg.norm(Wk.ravel())
Wm.append(Wk)
Ws.append(Wm)
return Ws
# Low level convolution
def _cwt(X, Ws, mode="same", decim=1, use_fft=True):
"""Compute cwt with fft based convolutions or temporal convolutions.
Parameters
----------
X : array of shape (n_signals, n_times)
The data.
Ws : list of array
Wavelets time series.
mode : {'full', 'valid', 'same'}
See numpy.convolve.
decim : int | slice, default 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note:: Decimation may create aliasing artifacts.
use_fft : bool, default True
Use the FFT for convolutions or not.
Returns
-------
out : array, shape (n_signals, n_freqs, n_time_decim)
The time-frequency transform of the signals.
"""
_check_option('mode', mode, ['same', 'valid', 'full'])
decim = _check_decim(decim)
X = np.asarray(X)
# Precompute wavelets for given frequency range to save time
n_signals, n_times = X.shape
n_times_out = X[:, decim].shape[1]
n_freqs = len(Ws)
Ws_max_size = max(W.size for W in Ws)
size = n_times + Ws_max_size - 1
# Always use 2**n-sized FFT
fsize = 2 ** int(np.ceil(np.log2(size)))
# precompute FFTs of Ws
if use_fft:
fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128)
warn_me = True
for i, W in enumerate(Ws):
if use_fft:
fft_Ws[i] = fft(W, fsize)
if len(W) > n_times and warn_me:
msg = ('At least one of the wavelets is longer than the signal. '
'Consider padding the signal or using shorter wavelets.')
if use_fft:
warn(msg, UserWarning)
warn_me = False # Suppress further warnings
else:
raise ValueError(msg)
# Make generator looping across signals
tfr = np.zeros((n_freqs, n_times_out), dtype=np.complex128)
for x in X:
if use_fft:
fft_x = fft(x, fsize)
# Loop across wavelets
for ii, W in enumerate(Ws):
if use_fft:
ret = ifft(fft_x * fft_Ws[ii])[:n_times + W.size - 1]
else:
ret = np.convolve(x, W, mode=mode)
# Center and decimate decomposition
if mode == 'valid':
sz = int(abs(W.size - n_times)) + 1
offset = (n_times - sz) // 2
this_slice = slice(offset // decim.step,
(offset + sz) // decim.step)
if use_fft:
ret = _centered(ret, sz)
tfr[ii, this_slice] = ret[decim]
elif mode == 'full' and not use_fft:
start = (W.size - 1) // 2
end = len(ret) - (W.size // 2)
ret = ret[start:end]
tfr[ii, :] = ret[decim]
else:
if use_fft:
ret = _centered(ret, n_times)
tfr[ii, :] = ret[decim]
yield tfr
# Loop of convolution: single trial
def _compute_tfr(epoch_data, freqs, sfreq=1.0, method='morlet',
n_cycles=7.0, zero_mean=None, time_bandwidth=None,
use_fft=True, decim=1, output='complex', n_jobs=1,
verbose=None):
"""Compute time-frequency transforms.
Parameters
----------
epoch_data : array of shape (n_epochs, n_channels, n_times)
The epochs.
freqs : array-like of floats, shape (n_freqs)
The frequencies.
sfreq : float | int, default 1.0
Sampling frequency of the data.
method : 'multitaper' | 'morlet', default 'morlet'
The time-frequency method. 'morlet' convolves a Morlet wavelet.
'multitaper' uses Morlet wavelets windowed with multiple DPSS
multitapers.
n_cycles : float | array of float, default 7.0
Number of cycles in the Morlet wavelet. Fixed number
or one per frequency.
zero_mean : bool | None, default None
None means True for method='multitaper' and False for method='morlet'.
If True, make sure the wavelets have a mean of zero.
time_bandwidth : float, default None
If None and method=multitaper, will be set to 4.0 (3 tapers).
Time x (Full) Bandwidth product. Only applies if
method == 'multitaper'. The number of good tapers (low-bias) is
chosen automatically based on this to equal floor(time_bandwidth - 1).
use_fft : bool, default True
Use the FFT for convolutions or not.
decim : int | slice, default 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note::
Decimation may create aliasing artifacts, yet decimation
is done after the convolutions.
output : str, default 'complex'
* 'complex' : single trial complex.
* 'power' : single trial power.
* 'phase' : single trial phase.
* 'avg_power' : average of single trial power.
* 'itc' : inter-trial coherence.
* 'avg_power_itc' : average of single trial power and inter-trial
coherence across trials.
%(n_jobs)s
The number of epochs to process at the same time. The parallelization
is implemented across channels.
%(verbose)s
Returns
-------
out : array
Time frequency transform of epoch_data. If output is in ['complex',
'phase', 'power'], then shape of out is (n_epochs, n_chans, n_freqs,
n_times), else it is (n_chans, n_freqs, n_times). If output is
'avg_power_itc', the real values code for 'avg_power' and the
imaginary values code for the 'itc': out = avg_power + i * itc
"""
# Check data
epoch_data = np.asarray(epoch_data)
if epoch_data.ndim != 3:
raise ValueError('epoch_data must be of shape (n_epochs, n_chans, '
'n_times), got %s' % (epoch_data.shape,))
# Check params
freqs, sfreq, zero_mean, n_cycles, time_bandwidth, decim = \
_check_tfr_param(freqs, sfreq, method, zero_mean, n_cycles,
time_bandwidth, use_fft, decim, output)
decim = _check_decim(decim)
if (freqs > sfreq / 2.).any():
raise ValueError('Cannot compute freq above Nyquist freq of the data '
'(%0.1f Hz), got %0.1f Hz'
% (sfreq / 2., freqs.max()))
# We decimate *after* decomposition, so we need to create our kernels
# for the original sfreq
if method == 'morlet':
W = morlet(sfreq, freqs, n_cycles=n_cycles, zero_mean=zero_mean)
Ws = [W] # to have same dimensionality as the 'multitaper' case
elif method == 'multitaper':
Ws = _make_dpss(sfreq, freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, zero_mean=zero_mean)
# Check wavelets
if len(Ws[0][0]) > epoch_data.shape[2]:
raise ValueError('At least one of the wavelets is longer than the '
'signal. Use a longer signal or shorter wavelets.')
# Initialize output
n_freqs = len(freqs)
n_epochs, n_chans, n_times = epoch_data[:, :, decim].shape
if output in ('power', 'phase', 'avg_power', 'itc'):
dtype = np.float64
elif output in ('complex', 'avg_power_itc'):
# avg_power_itc is stored as power + 1i * itc to keep a
# simple dimensionality
dtype = np.complex128
if ('avg_' in output) or ('itc' in output):
out = np.empty((n_chans, n_freqs, n_times), dtype)
else:
out = np.empty((n_chans, n_epochs, n_freqs, n_times), dtype)
# Parallel computation
parallel, my_cwt, _ = parallel_func(_time_frequency_loop, n_jobs)
# Parallelization is applied across channels.
tfrs = parallel(
my_cwt(channel, Ws, output, use_fft, 'same', decim)
for channel in epoch_data.transpose(1, 0, 2))
# FIXME: to avoid overheads we should use np.array_split()
for channel_idx, tfr in enumerate(tfrs):
out[channel_idx] = tfr
if ('avg_' not in output) and ('itc' not in output):
# This is to enforce that the first dimension is for epochs
out = out.transpose(1, 0, 2, 3)
return out
def _check_tfr_param(freqs, sfreq, method, zero_mean, n_cycles,
time_bandwidth, use_fft, decim, output):
"""Aux. function to _compute_tfr to check the params validity."""
# Check freqs
if not isinstance(freqs, (list, np.ndarray)):
raise ValueError('freqs must be an array-like, got %s '
'instead.' % type(freqs))
freqs = np.asarray(freqs, dtype=float)
if freqs.ndim != 1:
raise ValueError('freqs must be of shape (n_freqs,), got %s '
'instead.' % np.array(freqs.shape))
# Check sfreq
if not isinstance(sfreq, (float, int)):
raise ValueError('sfreq must be a float or an int, got %s '
'instead.' % type(sfreq))
sfreq = float(sfreq)
# Default zero_mean = True if multitaper else False
zero_mean = method == 'multitaper' if zero_mean is None else zero_mean
if not isinstance(zero_mean, bool):
raise ValueError('zero_mean should be of type bool, got %s. instead'
% type(zero_mean))
freqs = np.asarray(freqs)
if (method == 'multitaper') and (output == 'phase'):
raise NotImplementedError(
'This function is not optimized to compute the phase using the '
'multitaper method. Use np.angle of the complex output instead.')
# Check n_cycles
if isinstance(n_cycles, (int, float)):
n_cycles = float(n_cycles)
elif isinstance(n_cycles, (list, np.ndarray)):
n_cycles = np.array(n_cycles)
if len(n_cycles) != len(freqs):
raise ValueError('n_cycles must be a float or an array of length '
'%i frequencies, got %i cycles instead.' %
(len(freqs), len(n_cycles)))
else:
raise ValueError('n_cycles must be a float or an array, got %s '
'instead.' % type(n_cycles))
# Check time_bandwidth
if (method == 'morlet') and (time_bandwidth is not None):
raise ValueError('time_bandwidth only applies to "multitaper" method.')
elif method == 'multitaper':
time_bandwidth = (4.0 if time_bandwidth is None
else float(time_bandwidth))
# Check use_fft
if not isinstance(use_fft, bool):
raise ValueError('use_fft must be a boolean, got %s '
'instead.' % type(use_fft))
# Check decim
if isinstance(decim, int):
decim = slice(None, None, decim)
if not isinstance(decim, slice):
raise ValueError('decim must be an integer or a slice, '
'got %s instead.' % type(decim))
# Check output
_check_option('output', output, ['complex', 'power', 'phase',
'avg_power_itc', 'avg_power', 'itc'])
_check_option('method', method, ['multitaper', 'morlet'])
return freqs, sfreq, zero_mean, n_cycles, time_bandwidth, decim
def _time_frequency_loop(X, Ws, output, use_fft, mode, decim):
"""Aux. function to _compute_tfr.
Loops time-frequency transform across wavelets and epochs.
Parameters
----------
X : array, shape (n_epochs, n_times)
The epochs data of a single channel.
Ws : list, shape (n_tapers, n_wavelets, n_times)
The wavelets.
output : str
* 'complex' : single trial complex.
* 'power' : single trial power.
* 'phase' : single trial phase.
* 'avg_power' : average of single trial power.
* 'itc' : inter-trial coherence.
* 'avg_power_itc' : average of single trial power and inter-trial
coherence across trials.
use_fft : bool
Use the FFT for convolutions or not.
mode : {'full', 'valid', 'same'}
See numpy.convolve.
decim : slice
The decimation slice: e.g. power[:, decim]
"""
# Set output type
dtype = np.float64
if output in ['complex', 'avg_power_itc']:
dtype = np.complex128
# Init outputs
decim = _check_decim(decim)
n_epochs, n_times = X[:, decim].shape
n_freqs = len(Ws[0])
if ('avg_' in output) or ('itc' in output):
tfrs = np.zeros((n_freqs, n_times), dtype=dtype)
else:
tfrs = np.zeros((n_epochs, n_freqs, n_times), dtype=dtype)
# Loops across tapers.
for W in Ws:
coefs = _cwt(X, W, mode, decim=decim, use_fft=use_fft)
# Inter-trial phase locking is apparently computed per taper...
if 'itc' in output:
plf = np.zeros((n_freqs, n_times), dtype=np.complex128)
# Loop across epochs
for epoch_idx, tfr in enumerate(coefs):
# Transform complex values
if output in ['power', 'avg_power']:
tfr = (tfr * tfr.conj()).real # power
elif output == 'phase':
tfr = np.angle(tfr)
elif output == 'avg_power_itc':
tfr_abs = np.abs(tfr)
plf += tfr / tfr_abs # phase
tfr = tfr_abs ** 2 # power
elif output == 'itc':
plf += tfr / np.abs(tfr) # phase
continue # not need to stack anything else than plf
# Stack or add
if ('avg_' in output) or ('itc' in output):
tfrs += tfr
else:
tfrs[epoch_idx] += tfr
# Compute inter trial coherence
if output == 'avg_power_itc':
tfrs += 1j * np.abs(plf)
elif output == 'itc':
tfrs += np.abs(plf)
# Normalization of average metrics
if ('avg_' in output) or ('itc' in output):
tfrs /= n_epochs
# Normalization by number of taper
tfrs /= len(Ws)
return tfrs
def cwt(X, Ws, use_fft=True, mode='same', decim=1):
"""Compute time freq decomposition with continuous wavelet transform.
Parameters
----------
X : array, shape (n_signals, n_times)
The signals.
Ws : list of array
Wavelets time series.
use_fft : bool
Use FFT for convolutions. Defaults to True.
mode : 'same' | 'valid' | 'full'
Convention for convolution. 'full' is currently not implemented with
``use_fft=False``. Defaults to ``'same'``.
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note:: Decimation may create aliasing artifacts.
Defaults to 1.
Returns
-------
tfr : array, shape (n_signals, n_freqs, n_times)
The time-frequency decompositions.
See Also
--------
mne.time_frequency.tfr_morlet : Compute time-frequency decomposition
with Morlet wavelets.
"""
decim = _check_decim(decim)
n_signals, n_times = X[:, decim].shape
coefs = _cwt(X, Ws, mode, decim=decim, use_fft=use_fft)
tfrs = np.empty((n_signals, len(Ws), n_times), dtype=np.complex128)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr
return tfrs
def _tfr_aux(method, inst, freqs, decim, return_itc, picks, average,
output=None, **tfr_params):
from ..epochs import BaseEpochs
"""Help reduce redundancy between tfr_morlet and tfr_multitaper."""
decim = _check_decim(decim)
data = _get_data(inst, return_itc)
info = inst.info.copy() # make a copy as sfreq can be altered
info, data = _prepare_picks(info, data, picks, axis=1)
del picks
if average:
if output == 'complex':
raise ValueError('output must be "power" if average=True')
if return_itc:
output = 'avg_power_itc'
else:
output = 'avg_power'
else:
output = 'power' if output is None else output
if return_itc:
raise ValueError('Inter-trial coherence is not supported'
' with average=False')
out = _compute_tfr(data, freqs, info['sfreq'], method=method,
output=output, decim=decim, **tfr_params)
times = inst.times[decim].copy()
info['sfreq'] /= decim.step
if average:
if return_itc:
power, itc = out.real, out.imag
else:
power = out
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave,
method='%s-power' % method)
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave,
method='%s-itc' % method))
else:
power = out
if isinstance(inst, BaseEpochs):
meta = deepcopy(inst._metadata)
evs = deepcopy(inst.events)
ev_id = deepcopy(inst.event_id)
else:
# if the input is of class Evoked
meta = evs = ev_id = None
out = EpochsTFR(info, power, times, freqs, method='%s-power' % method,
events=evs, event_id=ev_id, metadata=meta)
return out
@verbose
def tfr_morlet(inst, freqs, n_cycles, use_fft=False, return_itc=True, decim=1,
n_jobs=1, picks=None, zero_mean=True, average=True,
output='power', verbose=None):
"""Compute Time-Frequency Representation (TFR) using Morlet wavelets.
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
use_fft : bool, default False
The fft based convolution or not.
return_itc : bool, default True
Return inter-trial coherence (ITC) as well as averaged power.
Must be ``False`` for evoked data.
decim : int | slice, default 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note:: Decimation may create aliasing artifacts.
%(n_jobs)s
picks : array-like of int | None, default None
The indices of the channels to decompose. If None, all available
good data channels are decomposed.
zero_mean : bool, default True
Make sure the wavelet has a mean of zero.
.. versionadded:: 0.13.0
%(tfr_average)s
output : str
Can be "power" (default) or "complex". If "complex", then
average must be False.
.. versionadded:: 0.15.0
%(verbose)s
Returns
-------
power : AverageTFR | EpochsTFR
The averaged or single-trial power.
itc : AverageTFR | EpochsTFR
The inter-trial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
mne.time_frequency.tfr_array_morlet
mne.time_frequency.tfr_multitaper
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_array_stockwell
"""
tfr_params = dict(n_cycles=n_cycles, n_jobs=n_jobs, use_fft=use_fft,
zero_mean=zero_mean, output=output)
return _tfr_aux('morlet', inst, freqs, decim, return_itc, picks,
average, **tfr_params)
@verbose
def tfr_array_morlet(epoch_data, sfreq, freqs, n_cycles=7.0,
zero_mean=False, use_fft=True, decim=1, output='complex',
n_jobs=1, verbose=None):
"""Compute time-frequency transform using Morlet wavelets.
Convolves epoch data with selected Morlet wavelets.
Parameters
----------
epoch_data : array of shape (n_epochs, n_channels, n_times)
The epochs.
sfreq : float | int
Sampling frequency of the data.
freqs : array-like of float, shape (n_freqs,)
The frequencies.
n_cycles : float | array of float, default 7.0
Number of cycles in the Morlet wavelet. Fixed number or one per
frequency.
zero_mean : bool | False
If True, make sure the wavelets have a mean of zero. default False.
use_fft : bool
Use the FFT for convolutions or not. default True.
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition. default 1
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note::
Decimation may create aliasing artifacts, yet decimation
is done after the convolutions.
output : str, default 'complex'
* 'complex' : single trial complex.
* 'power' : single trial power.
* 'phase' : single trial phase.
* 'avg_power' : average of single trial power.
* 'itc' : inter-trial coherence.
* 'avg_power_itc' : average of single trial power and inter-trial
coherence across trials.
%(n_jobs)s
The number of epochs to process at the same time. The parallelization
is implemented across channels. Default 1.
%(verbose)s
Returns
-------
out : array
Time frequency transform of epoch_data. If output is in ['complex',
'phase', 'power'], then shape of out is (n_epochs, n_chans, n_freqs,
n_times), else it is (n_chans, n_freqs, n_times). If output is
'avg_power_itc', the real values code for 'avg_power' and the
imaginary values code for the 'itc': out = avg_power + i * itc.
See Also
--------
mne.time_frequency.tfr_morlet
mne.time_frequency.tfr_multitaper
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_array_stockwell
Notes
-----
.. versionadded:: 0.14.0
"""
return _compute_tfr(epoch_data=epoch_data, freqs=freqs,
sfreq=sfreq, method='morlet', n_cycles=n_cycles,
zero_mean=zero_mean, time_bandwidth=None,
use_fft=use_fft, decim=decim, output=output,
n_jobs=n_jobs, verbose=verbose)
@verbose
def tfr_multitaper(inst, freqs, n_cycles, time_bandwidth=4.0,
use_fft=True, return_itc=True, decim=1,
n_jobs=1, picks=None, average=True, verbose=None):
"""Compute Time-Frequency Representation (TFR) using DPSS tapers.
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
The time-window length is thus T = n_cycles / freq.
time_bandwidth : float, (optional), default 4.0 (n_tapers=3)
Time x (Full) Bandwidth product. Should be >= 2.0.
Choose this along with n_cycles to get desired frequency resolution.
The number of good tapers (least leakage from far away frequencies)
is chosen automatically based on this to floor(time_bandwidth - 1).
E.g., With freq = 20 Hz and n_cycles = 10, we get time = 0.5 s.
If time_bandwidth = 4., then frequency smoothing is (4 / time) = 8 Hz.
use_fft : bool, default True
The fft based convolution or not.
return_itc : bool, default True
Return inter-trial coherence (ITC) as well as averaged (or
single-trial) power.
decim : int | slice, default 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note:: Decimation may create aliasing artifacts.
%(n_jobs)s
%(picks_good_data)s
%(tfr_average)s
%(verbose)s
Returns
-------
power : AverageTFR | EpochsTFR
The averaged or single-trial power.
itc : AverageTFR | EpochsTFR
The inter-trial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_array_stockwell
mne.time_frequency.tfr_morlet
mne.time_frequency.tfr_array_morlet
Notes
-----
.. versionadded:: 0.9.0
"""
tfr_params = dict(n_cycles=n_cycles, n_jobs=n_jobs, use_fft=use_fft,
zero_mean=True, time_bandwidth=time_bandwidth)
return _tfr_aux('multitaper', inst, freqs, decim, return_itc, picks,
average, **tfr_params)
# TFR(s) class
class _BaseTFR(ContainsMixin, UpdateChannelsMixin, SizeMixin):
"""Base TFR class."""
@property
def data(self):
return self._data
@data.setter
def data(self, data):
self._data = data
@property
def ch_names(self):
"""Channel names."""
return self.info['ch_names']
@fill_doc
def crop(self, tmin=None, tmax=None, fmin=None, fmax=None,
include_tmax=True):
"""Crop data to a given time interval in place.
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
fmin : float | None
Lowest frequency of selection in Hz.
.. versionadded:: 0.18.0
fmax : float | None
Highest frequency of selection in Hz.
.. versionadded:: 0.18.0
%(include_tmax)s
Returns
-------
inst : instance of AverageTFR
The modified instance.
"""
if tmin is not None or tmax is not None:
time_mask = _time_mask(
self.times, tmin, tmax, sfreq=self.info['sfreq'],
include_tmax=include_tmax)
else:
time_mask = slice(None)
if fmin is not None or fmax is not None:
freq_mask = _freq_mask(self.freqs, sfreq=self.info['sfreq'],
fmin=fmin, fmax=fmax)
else:
freq_mask = slice(None)
self.times = self.times[time_mask]
self.freqs = self.freqs[freq_mask]
# Deal with broadcasting (boolean arrays do not broadcast, but indices
# do, so we need to convert freq_mask to make use of broadcasting)
if isinstance(time_mask, np.ndarray) and \
isinstance(freq_mask, np.ndarray):
freq_mask = np.where(freq_mask)[0][:, np.newaxis]
self.data = self.data[..., freq_mask, time_mask]
return self
def copy(self):
"""Return a copy of the instance.
Returns
-------
copy : instance of EpochsTFR | instance of AverageTFR
A copy of the instance.
"""
return deepcopy(self)
@verbose
def apply_baseline(self, baseline, mode='mean', verbose=None):
"""Baseline correct the data.
Parameters
----------
baseline : array-like, shape (2,)
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'mean' | 'ratio' | 'logratio' | 'percent' | 'zscore' | 'zlogratio'
Perform baseline correction by
- subtracting the mean of baseline values ('mean')
- dividing by the mean of baseline values ('ratio')
- dividing by the mean of baseline values and taking the log
('logratio')
- subtracting the mean of baseline values followed by dividing by
the mean of baseline values ('percent')
- subtracting the mean of baseline values and dividing by the
standard deviation of baseline values ('zscore')
- dividing by the mean of baseline values, taking the log, and
dividing by the standard deviation of log baseline values
('zlogratio')
%(verbose_meth)s
Returns
-------
inst : instance of AverageTFR
The modified instance.
""" # noqa: E501
rescale(self.data, self.times, baseline, mode, copy=False)
return self
def save(self, fname, overwrite=False):
"""Save TFR object to hdf5 file.
Parameters
----------
fname : str
The file name, which should end with ``-tfr.h5``.
overwrite : bool
If True, overwrite file (if it exists). Defaults to False.
See Also
--------
read_tfrs, write_tfrs
"""
write_tfrs(fname, self, overwrite=overwrite)
@fill_doc
class AverageTFR(_BaseTFR):
"""Container for Time-Frequency data.
Can for example store induced power at sensor level or inter-trial
coherence.
Parameters
----------
info : Info
The measurement info.
data : ndarray, shape (n_channels, n_freqs, n_times)
The data.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
nave : int
The number of averaged TFRs.
comment : str | None, default None
Comment on the data, e.g., the experimental condition.
method : str | None, default None
Comment on the method used to compute the data, e.g., morlet wavelet.
%(verbose)s
Attributes
----------
info : instance of Info
Measurement info.
ch_names : list
The names of the channels.
nave : int
Number of averaged epochs.
data : ndarray, shape (n_channels, n_freqs, n_times)
The data array.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
comment : str
Comment on dataset. Can be the condition.
method : str | None, default None
Comment on the method used to compute the data, e.g., morlet wavelet.
"""
@verbose
def __init__(self, info, data, times, freqs, nave, comment=None,
method=None, verbose=None): # noqa: D102
self.info = info
if data.ndim != 3:
raise ValueError('data should be 3d. Got %d.' % data.ndim)
n_channels, n_freqs, n_times = data.shape
if n_channels != len(info['chs']):
raise ValueError("Number of channels and data size don't match"
" (%d != %d)." % (n_channels, len(info['chs'])))
if n_freqs != len(freqs):
raise ValueError("Number of frequencies and data size don't match"
" (%d != %d)." % (n_freqs, len(freqs)))
if n_times != len(times):
raise ValueError("Number of times and data size don't match"
" (%d != %d)." % (n_times, len(times)))
self.data = data
self.times = np.array(times, dtype=float)
self.freqs = np.array(freqs, dtype=float)
self.nave = nave
self.comment = comment
self.method = method
self.preload = True
@verbose
def plot(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
cmap='RdBu_r', dB=False, colorbar=True, show=True, title=None,
axes=None, layout=None, yscale='auto', mask=None,
mask_style=None, mask_cmap="Greys", mask_alpha=0.1, combine=None,
exclude=[], verbose=None):
"""Plot TFRs as a two-dimensional image(s).
Parameters
----------
%(picks_good_data)s
baseline : None (default) or tuple, shape (2,)
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'mean' | 'ratio' | 'logratio' | 'percent' | 'zscore' | 'zlogratio'
Perform baseline correction by
- subtracting the mean of baseline values ('mean')
- dividing by the mean of baseline values ('ratio')
- dividing by the mean of baseline values and taking the log
('logratio')
- subtracting the mean of baseline values followed by dividing by
the mean of baseline values ('percent')
- subtracting the mean of baseline values and dividing by the
standard deviation of baseline values ('zscore')
- dividing by the mean of baseline values, taking the log, and
dividing by the standard deviation of log baseline values
('zlogratio')
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The minimum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maximum value an the color scale. If vmax is None, the data
maximum value is used.
cmap : matplotlib colormap | 'interactive' | (colormap, bool)
The colormap to use. If tuple, the first value indicates the
colormap to use and the second value is a boolean defining
interactivity. In interactive mode the colors are adjustable by
clicking and dragging the colorbar with left and right mouse
button. Left mouse button moves the scale up and down and right
mouse button adjusts the range. Hitting space bar resets the range.
Up and down arrows can be used to change the colormap. If
'interactive', translates to ('RdBu_r', True). Defaults to
'RdBu_r'.
.. warning:: Interactive mode works smoothly only for a small
amount of images.
dB : bool
If True, 10*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot. For user defined axes,
the colorbar cannot be drawn. Defaults to True.
show : bool
Call pyplot.show() at the end.
title : str | 'auto' | None
String for title. Defaults to None (blank/no title). If 'auto',
automatically create a title that lists up to 6 of the channels
used in the figure.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channels. If instance of Axes,
there must be only one channel plotted.
layout : Layout | None
Layout instance specifying sensor positions. Used for interactive
plotting of topographies on rectangle selection. If possible, the
correct layout is inferred from the data.
yscale : 'auto' (default) | 'linear' | 'log'
The scale of y (frequency) axis. 'linear' gives linear y axis,
'log' leads to log-spaced y axis and 'auto' detects if frequencies
are log-spaced and only then sets the y axis to 'log'.
.. versionadded:: 0.14.0
mask : ndarray | None
An array of booleans of the same shape as the data. Entries of the
data that correspond to False in the mask are plotted
transparently. Useful for, e.g., masking for statistical
significance.
.. versionadded:: 0.16.0
mask_style : None | 'both' | 'contour' | 'mask'
If ``mask`` is not None: if ``'contour'``, a contour line is drawn
around the masked areas (``True`` in ``mask``). If ``'mask'``,
entries not ``True`` in ``mask`` are shown transparently. If
``'both'``, both a contour and transparency are used.
If ``None``, defaults to ``'both'`` if ``mask`` is not None, and is
ignored otherwise.
.. versionadded:: 0.17
mask_cmap : matplotlib colormap | (colormap, bool) | 'interactive'
The colormap chosen for masked parts of the image (see below), if
``mask`` is not ``None``. If None, ``cmap`` is reused. Defaults to
``'Greys'``. Not interactive. Otherwise, as ``cmap``.
.. versionadded:: 0.17
mask_alpha : float
A float between 0 and 1. If ``mask`` is not None, this sets the
alpha level (degree of transparency) for the masked-out segments.
I.e., if 0, masked-out segments are not visible at all.
Defaults to 0.1.
.. versionadded:: 0.16.0
combine : 'mean' | 'rms' | None
Type of aggregation to perform across selected channels. If
None, plot one figure per selected channel.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded. Defaults to an empty list.
%(verbose_meth)s
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
""" # noqa: E501
return self._plot(picks=picks, baseline=baseline, mode=mode,
tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
vmin=vmin, vmax=vmax, cmap=cmap, dB=dB,
colorbar=colorbar, show=show, title=title,
axes=axes, layout=layout, yscale=yscale, mask=mask,
mask_style=mask_style, mask_cmap=mask_cmap,
mask_alpha=mask_alpha, combine=combine,
exclude=exclude, verbose=verbose)
@verbose
def _plot(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
cmap='RdBu_r', dB=False, colorbar=True, show=True, title=None,
axes=None, layout=None, yscale='auto', mask=None,
mask_style=None, mask_cmap="Greys", mask_alpha=.25,
combine=None, exclude=None, copy=True,
source_plot_joint=False, topomap_args=dict(), ch_type=None,
verbose=None):
"""Plot TFRs as a two-dimensional image(s).
See self.plot() for parameters description.
"""
import matplotlib.pyplot as plt
from ..viz.topo import _imshow_tfr
# channel selection
# simply create a new tfr object(s) with the desired channel selection
tfr = _preproc_tfr_instance(
self, picks, tmin, tmax, fmin, fmax, vmin, vmax, dB, mode,
baseline, exclude, copy)
del picks
data = tfr.data
n_picks = len(tfr.ch_names) if combine is None else 1
if combine == 'mean':
data = data.mean(axis=0, keepdims=True)
elif combine == 'rms':
data = np.sqrt((data ** 2).mean(axis=0, keepdims=True))
elif combine is not None:
raise ValueError('combine must be None, mean or rms.')
if isinstance(axes, list) or isinstance(axes, np.ndarray):
if len(axes) != n_picks:
raise RuntimeError('There must be an axes for each picked '
'channel.')
tmin, tmax = tfr.times[[0, -1]]
if vmax is None:
vmax = np.abs(data).max()
if vmin is None:
vmin = -np.abs(data).max()
if isinstance(axes, plt.Axes):
axes = [axes]
cmap = _setup_cmap(cmap)
for idx in range(len(data)):
if axes is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = axes[idx]
fig = ax.get_figure()
onselect_callback = partial(
tfr._onselect, cmap=cmap, source_plot_joint=source_plot_joint,
topomap_args={k: v for k, v in topomap_args.items()
if k not in {"vmin", "vmax", "cmap", "axes"}})
_imshow_tfr(
ax, 0, tmin, tmax, vmin, vmax, onselect_callback, ylim=None,
tfr=data[idx: idx + 1], freq=tfr.freqs, x_label='Time (s)',
y_label='Frequency (Hz)', colorbar=colorbar, cmap=cmap,
yscale=yscale, mask=mask, mask_style=mask_style,
mask_cmap=mask_cmap, mask_alpha=mask_alpha)
if title is None:
if combine is None or len(tfr.info['ch_names']) == 1:
title = tfr.info['ch_names'][0]
else:
title = _set_title_multiple_electrodes(
title, combine, tfr.info["ch_names"], all=True,
ch_type=ch_type)
if title:
fig.suptitle(title)
plt_show(show)
# XXX This is inside the loop, guaranteeing a single iter!
# Also there is no None-contingent behavior here so the docstring
# was wrong (saying it would be collapsed)
return fig
@verbose
def plot_joint(self, timefreqs=None, picks=None, baseline=None,
mode='mean', tmin=None, tmax=None, fmin=None, fmax=None,
vmin=None, vmax=None, cmap='RdBu_r', dB=False,
colorbar=True, show=True, title=None,
yscale='auto', combine='mean', exclude=[],
topomap_args=None, image_args=None, verbose=None):
"""Plot TFRs as a two-dimensional image with topomaps.
Parameters
----------
timefreqs : None | list of tuple | dict of tuple
The time-frequency point(s) for which topomaps will be plotted.
See Notes.
%(picks_good_data)s
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None, the beginning of the data is used.
If b is None, then b is set to the end of the interval.
If baseline is equal to (None, None), the entire time
interval is used.
mode : None | str
If str, must be one of 'ratio', 'zscore', 'mean', 'percent',
'logratio' and 'zlogratio'.
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)),
mean simply subtracts the mean power, percent is the same as
applying ratio then mean, logratio is the same as mean but then
rendered in log-scale, zlogratio is the same as zscore but data
is rendered in log-scale first.
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The minimum value of the color scale for the image (for
topomaps, see ``topomap_args``). If vmin is None, the data
absolute minimum value is used.
vmax : float | None
The maximum value of the color scale for the image (for
topomaps, see ``topomap_args``). If vmax is None, the data
absolute maximum value is used.
cmap : matplotlib colormap
The colormap to use.
dB : bool
If True, 10*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot (relating to the
topomaps). For user defined axes, the colorbar cannot be drawn.
Defaults to True.
show : bool
Call pyplot.show() at the end.
title : str | None
String for title. Defaults to None (blank/no title).
yscale : 'auto' (default) | 'linear' | 'log'
The scale of y (frequency) axis. 'linear' gives linear y axis,
'log' leads to log-spaced y axis and 'auto' detects if frequencies
are log-spaced and only then sets the y axis to 'log'.
combine : 'mean' | 'rms'
Type of aggregation to perform across selected channels.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded. Defaults to an empty list, i.e., ``[]``.
topomap_args : None | dict
A dict of ``kwargs`` that are forwarded to
:func:`mne.viz.plot_topomap` to style the topomaps. ``axes`` and
``show`` are ignored. If ``times`` is not in this dict, automatic
peak detection is used. Beyond that, if ``None``, no customizable
arguments will be passed.
Defaults to ``None``.
image_args : None | dict
A dict of ``kwargs`` that are forwarded to :meth:`AverageTFR.plot`
to style the image. ``axes`` and ``show`` are ignored. Beyond that,
if ``None``, no customizable arguments will be passed.
Defaults to ``None``.
%(verbose_meth)s
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
Notes
-----
``timefreqs`` has three different modes: tuples, dicts, and auto.
For (list of) tuple(s) mode, each tuple defines a pair
(time, frequency) in s and Hz on the TFR plot. For example, to
look at 10 Hz activity 1 second into the epoch and 3 Hz activity
300 msec into the epoch, ::
timefreqs=((1, 10), (.3, 3))
If provided as a dictionary, (time, frequency) tuples are keys and
(time_window, frequency_window) tuples are the values - indicating the
width of the windows (centered on the time and frequency indicated by
the key) to be averaged over. For example, ::
timefreqs={(1, 10): (0.1, 2)}
would translate into a window that spans 0.95 to 1.05 seconds, as
well as 9 to 11 Hz. If None, a single topomap will be plotted at the
absolute peak across the time-frequency representation.
.. versionadded:: 0.16.0
""" # noqa: E501
from ..viz.topomap import (_set_contour_locator, plot_topomap,
_get_pos_outlines, _find_topomap_coords)
import matplotlib.pyplot as plt
#####################################
# Handle channels (picks and types) #
#####################################
# it would be nicer to let this happen in self._plot,
# but we need it here to do the loop over the remaining channel
# types in case a user supplies `picks` that pre-select only one
# channel type.
# Nonetheless, it should be refactored for code reuse.
copy = any(var is not None for var in (exclude, picks, baseline))
tfr = _pick_inst(self, picks, exclude, copy=copy)
del picks
ch_types = _get_channel_types(tfr.info, unique=True)
# if multiple sensor types: one plot per channel type, recursive call
if len(ch_types) > 1:
logger.info("Multiple channel types selected, returning one "
"figure per type.")
figs = list()
for this_type in ch_types: # pick corresponding channel type
type_picks = [idx for idx in range(tfr.info['nchan'])
if channel_type(tfr.info, idx) == this_type]
tf_ = _pick_inst(tfr, type_picks, None, copy=True)
if len(_get_channel_types(tf_.info, unique=True)) > 1:
raise RuntimeError(
'Possibly infinite loop due to channel selection '
'problem. This should never happen! Please check '
'your channel types.')
figs.append(
tf_.plot_joint(
timefreqs=timefreqs, picks=None, baseline=baseline,
mode=mode, tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
vmin=vmin, vmax=vmax, cmap=cmap, dB=dB,
colorbar=colorbar, show=False, title=title,
yscale=yscale, combine=combine,
exclude=None, topomap_args=topomap_args,
verbose=verbose))
return figs
else:
ch_type = ch_types.pop()
# Handle timefreqs
timefreqs = _get_timefreqs(tfr, timefreqs)
n_timefreqs = len(timefreqs)
if topomap_args is None:
topomap_args = dict()
topomap_args_pass = {k: v for k, v in topomap_args.items() if
k not in ('axes', 'show', 'colorbar')}
topomap_args_pass['outlines'] = topomap_args.get('outlines', 'skirt')
topomap_args_pass["contours"] = topomap_args.get('contours', 6)
##############
# Image plot #
##############
fig, tf_ax, map_ax, cbar_ax = _prepare_joint_axes(n_timefreqs)
cmap = _setup_cmap(cmap)
# image plot
# we also use this to baseline and truncate (times and freqs)
# (a copy of) the instance
if image_args is None:
image_args = dict()
fig = tfr._plot(
picks=None, baseline=baseline, mode=mode, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax, vmin=vmin, vmax=vmax, cmap=cmap, dB=dB,
colorbar=False, show=False, title=title, axes=tf_ax,
yscale=yscale, combine=combine, exclude=None, copy=False,
source_plot_joint=True, topomap_args=topomap_args_pass,
ch_type=ch_type, **image_args)
# set and check time and freq limits ...
# can only do this after the tfr plot because it may change these
# parameters
tmax, tmin = tfr.times.max(), tfr.times.min()
fmax, fmin = tfr.freqs.max(), tfr.freqs.min()
for time, freq in timefreqs.keys():
if not (tmin <= time <= tmax):
error_value = "time point (" + str(time) + " s)"
elif not (fmin <= freq <= fmax):
error_value = "frequency (" + str(freq) + " Hz)"
else:
continue
raise ValueError("Requested " + error_value + " exceeds the range"
"of the data. Choose different `timefreqs`.")
############
# Topomaps #
############
titles, all_data, all_pos, vlims = [], [], [], []
# the structure here is a bit complicated to allow aggregating vlims
# over all topomaps. First, one loop over all timefreqs to collect
# vlims. Then, find the max vlims and in a second loop over timefreqs,
# do the actual plotting.
timefreqs_array = np.array([np.array(keys) for keys in timefreqs])
order = timefreqs_array[:, 0].argsort() # sort by time
for ii, (time, freq) in enumerate(timefreqs_array[order]):
avg = timefreqs[(time, freq)]
# set up symmetric windows
time_half_range, freq_half_range = avg / 2.
if time_half_range == 0:
time = tfr.times[np.argmin(np.abs(tfr.times - time))]
if freq_half_range == 0:
freq = tfr.freqs[np.argmin(np.abs(tfr.freqs - freq))]
if (time_half_range == 0) and (freq_half_range == 0):
sub_map_title = '(%.2f s,\n%.1f Hz)' % (time, freq)
else:
sub_map_title = \
'(%.1f \u00B1 %.1f s,\n%.1f \u00B1 %.1f Hz)' % \
(time, time_half_range, freq, freq_half_range)
tmin = time - time_half_range
tmax = time + time_half_range
fmin = freq - freq_half_range
fmax = freq + freq_half_range
data = tfr.data
# merging grads here before rescaling makes ERDs visible
sphere = topomap_args.get('sphere')
if ch_type == 'grad':
picks = _pair_grad_sensors(tfr.info, topomap_coords=False)
pos = _find_topomap_coords(
tfr.info, picks=picks[::2], sphere=sphere)
method = combine or 'rms'
data, _ = _merge_ch_data(data[picks], ch_type, [],
method=method)
del picks, method
else:
pos, _ = _get_pos_outlines(tfr.info, None, sphere)
del sphere
all_pos.append(pos)
data, times, freqs, _, _ = _preproc_tfr(
data, tfr.times, tfr.freqs, tmin, tmax, fmin, fmax,
mode, baseline, vmin, vmax, None, tfr.info['sfreq'])
vlims.append(np.abs(data).max())
titles.append(sub_map_title)
all_data.append(data)
new_t = tfr.times[np.abs(tfr.times - np.median([times])).argmin()]
new_f = tfr.freqs[np.abs(tfr.freqs - np.median([freqs])).argmin()]
timefreqs_array[ii] = (new_t, new_f)
# passing args to the topomap calls
max_lim = max(vlims)
topomap_args_pass["vmin"] = vmin = topomap_args.get('vmin', -max_lim)
topomap_args_pass["vmax"] = vmax = topomap_args.get('vmax', max_lim)
locator, contours = _set_contour_locator(
vmin, vmax, topomap_args_pass["contours"])
topomap_args_pass['contours'] = contours
for ax, title, data, pos in zip(map_ax, titles, all_data, all_pos):
ax.set_title(title)
plot_topomap(data.mean(axis=(-1, -2)), pos,
cmap=cmap[0], axes=ax, show=False,
**topomap_args_pass)
#############
# Finish up #
#############
if colorbar:
from matplotlib import ticker
cbar = plt.colorbar(ax.images[0], cax=cbar_ax)
if locator is None:
locator = ticker.MaxNLocator(nbins=5)
cbar.locator = locator
cbar.update_ticks()
plt.subplots_adjust(left=.12, right=.925, bottom=.14,
top=1. if title is not None else 1.2)
# draw the connection lines between time series and topoplots
lines = [_connection_line(time_, fig, tf_ax, map_ax_, y=freq_,
y_source_transform="transData")
for (time_, freq_), map_ax_ in zip(timefreqs_array, map_ax)]
fig.lines.extend(lines)
plt_show(show)
return fig
def _onselect(self, eclick, erelease, baseline=None, mode=None,
cmap=None, source_plot_joint=False, topomap_args=None):
"""Handle rubber band selector in channel tfr."""
from ..viz.topomap import plot_tfr_topomap, plot_topomap, _add_colorbar
if abs(eclick.x - erelease.x) < .1 or abs(eclick.y - erelease.y) < .1:
return
tmin = round(min(eclick.xdata, erelease.xdata), 5) # s
tmax = round(max(eclick.xdata, erelease.xdata), 5)
fmin = round(min(eclick.ydata, erelease.ydata), 5) # Hz
fmax = round(max(eclick.ydata, erelease.ydata), 5)
tmin = min(self.times, key=lambda x: abs(x - tmin)) # find closest
tmax = min(self.times, key=lambda x: abs(x - tmax))
fmin = min(self.freqs, key=lambda x: abs(x - fmin))
fmax = min(self.freqs, key=lambda x: abs(x - fmax))
if tmin == tmax or fmin == fmax:
logger.info('The selected area is too small. '
'Select a larger time-frequency window.')
return
types = list()
if 'eeg' in self:
types.append('eeg')
if 'mag' in self:
types.append('mag')
if 'grad' in self:
if len(_pair_grad_sensors(self.info, topomap_coords=False,
raise_error=False)) >= 2:
types.append('grad')
elif len(types) == 0:
return # Don't draw a figure for nothing.
fig = figure_nobar()
fig.suptitle('{:.2f} s - {:.2f} s, {:.2f} Hz - {:.2f} Hz'.format(
tmin, tmax, fmin, fmax), y=0.04)
if source_plot_joint:
ax = fig.add_subplot(111)
data = _preproc_tfr(
self.data, self.times, self.freqs, tmin, tmax, fmin, fmax,
None, None, None, None, None, self.info['sfreq'])[0]
data = data.mean(-1).mean(-1)
vmax = np.abs(data).max()
im, _ = plot_topomap(data, self.info, vmin=-vmax, vmax=vmax,
cmap=cmap[0], axes=ax, show=False,
**topomap_args)
_add_colorbar(ax, im, cmap, title="AU", pad=.1)
fig.show()
else:
for idx, ch_type in enumerate(types):
ax = fig.add_subplot(1, len(types), idx + 1)
plot_tfr_topomap(self, ch_type=ch_type, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax,
baseline=baseline, mode=mode, cmap=None,
title=ch_type, vmin=None, vmax=None, axes=ax)
@fill_doc
def plot_topo(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
layout=None, cmap='RdBu_r', title=None, dB=False,
colorbar=True, layout_scale=0.945, show=True,
border='none', fig_facecolor='k', fig_background=None,
font_color='w', yscale='auto'):
"""Plot TFRs in a topography with images.
Parameters
----------
%(picks_good_data)s
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'mean' | 'ratio' | 'logratio' | 'percent' | 'zscore' | 'zlogratio'
Perform baseline correction by
- subtracting the mean of baseline values ('mean')
- dividing by the mean of baseline values ('ratio')
- dividing by the mean of baseline values and taking the log
('logratio')
- subtracting the mean of baseline values followed by dividing by
the mean of baseline values ('percent')
- subtracting the mean of baseline values and dividing by the
standard deviation of baseline values ('zscore')
- dividing by the mean of baseline values, taking the log, and
dividing by the standard deviation of log baseline values
('zlogratio')
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The minimum value of the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maximum value of the color scale. If vmax is None, the data
maximum value is used.
layout : Layout | None
Layout instance specifying sensor positions. If possible, the
correct layout is inferred from the data.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
title : str
Title of the figure.
dB : bool
If True, 10*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot.
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
show : bool
Call pyplot.show() at the end.
border : str
Matplotlib borders style to be used for each sensor plot.
fig_facecolor : color
The figure face color. Defaults to black.
fig_background : None | array
A background image for the figure. This must be a valid input to
`matplotlib.pyplot.imshow`. Defaults to None.
font_color : color
The color of tick labels in the colorbar. Defaults to white.
yscale : 'auto' (default) | 'linear' | 'log'
The scale of y (frequency) axis. 'linear' gives linear y axis,
'log' leads to log-spaced y axis and 'auto' detects if frequencies
are log-spaced and only then sets the y axis to 'log'.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
""" # noqa: E501
from ..viz.topo import _imshow_tfr, _plot_topo, _imshow_tfr_unified
from ..viz import add_background_image
times = self.times.copy()
freqs = self.freqs
data = self.data
info = self.info
info, data = _prepare_picks(info, data, picks, axis=0)
del picks
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax,
mode, baseline, vmin, vmax, dB, info['sfreq'])
if layout is None:
from mne import find_layout
layout = find_layout(self.info)
onselect_callback = partial(self._onselect, baseline=baseline,
mode=mode)
click_fun = partial(_imshow_tfr, tfr=data, freq=freqs, yscale=yscale,
cmap=(cmap, True), onselect=onselect_callback)
imshow = partial(_imshow_tfr_unified, tfr=data, freq=freqs, cmap=cmap,
onselect=onselect_callback)
fig = _plot_topo(info=info, times=times, show_func=imshow,
click_func=click_fun, layout=layout,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title, border=border,
x_label='Time (s)', y_label='Frequency (Hz)',
fig_facecolor=fig_facecolor, font_color=font_color,
unified=True, img=True)
add_background_image(fig, fig_background)
plt_show(show)
return fig
@fill_doc
def plot_topomap(self, tmin=None, tmax=None, fmin=None, fmax=None,
ch_type=None, baseline=None, mode='mean',
vmin=None, vmax=None, cmap=None, sensors=True,
colorbar=True, unit=None, res=64, size=2,
cbar_fmt='%1.1e', show_names=False, title=None,
axes=None, show=True, outlines='head',
contours=6, sphere=None):
"""Plot topographic maps of time-frequency intervals of TFR data.
Parameters
----------
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'mean' | 'ratio' | 'logratio' | 'percent' | 'zscore' | 'zlogratio'
Perform baseline correction by
- subtracting the mean of baseline values ('mean')
- dividing by the mean of baseline values ('ratio')
- dividing by the mean of baseline values and taking the log
('logratio')
- subtracting the mean of baseline values followed by dividing by
the mean of baseline values ('percent')
- subtracting the mean of baseline values and dividing by the
standard deviation of baseline values ('zscore')
- dividing by the mean of baseline values, taking the log, and
dividing by the standard deviation of log baseline values
('zlogratio')
vmin : float | callable | None
The value specifying the lower bound of the color range. If None,
and vmax is None, -vmax is used. Else np.min(data) or in case
data contains only positive values 0. If callable, the output
equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range. If None,
the maximum value is used. If callable, the output equals
vmax(data). Defaults to None.
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap
to use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging
the colorbar with left and right mouse button. Left mouse button
moves the scale up and down and right mouse button adjusts the
range. Hitting space bar resets the range. Up and down arrows can
be used to change the colormap. If None (default), 'Reds' is used
for all positive data, otherwise defaults to 'RdBu_r'. If
'interactive', translates to (None, True).
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If ``mask`` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
axes : instance of Axes | None
The axes to plot to. If None the axes is defined automatically.
show : bool
Call pyplot.show() at the end.
%(topomap_outlines)s
contours : int | array of float
The number of contour lines to draw. If 0, no contours will be
drawn. When an integer, matplotlib ticker locator is used to find
suitable values for the contour thresholds (may sometimes be
inaccurate, use array for accuracy). If an array, the values
represent the levels for the contours. If colorbar=True, the ticks
in colorbar correspond to the contour levels. Defaults to 6.
%(topomap_sphere_auto)s
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
""" # noqa: E501
from ..viz import plot_tfr_topomap
return plot_tfr_topomap(self, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, ch_type=ch_type, baseline=baseline,
mode=mode, vmin=vmin, vmax=vmax,
cmap=cmap, sensors=sensors, colorbar=colorbar,
unit=unit, res=res, size=size,
cbar_fmt=cbar_fmt, show_names=show_names,
title=title, axes=axes, show=show,
outlines=outlines,
contours=contours, sphere=sphere)
def _check_compat(self, tfr):
"""Check that self and tfr have the same time-frequency ranges."""
assert np.all(tfr.times == self.times)
assert np.all(tfr.freqs == self.freqs)
def __add__(self, tfr): # noqa: D105
"""Add instances."""
self._check_compat(tfr)
out = self.copy()
out.data += tfr.data
return out
def __iadd__(self, tfr): # noqa: D105
self._check_compat(tfr)
self.data += tfr.data
return self
def __sub__(self, tfr): # noqa: D105
"""Subtract instances."""
self._check_compat(tfr)
out = self.copy()
out.data -= tfr.data
return out
def __isub__(self, tfr): # noqa: D105
self._check_compat(tfr)
self.data -= tfr.data
return self
def __truediv__(self, a): # noqa: D105
"""Divide instances."""
out = self.copy()
out /= a
return out
def __itruediv__(self, a): # noqa: D105
self.data /= a
return self
def __mul__(self, a):
"""Multiply source instances."""
out = self.copy()
out *= a
return out
def __imul__(self, a): # noqa: D105
self.data *= a
return self
def __repr__(self): # noqa: D105
s = "time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
s += ", nave : %d" % self.nave
s += ', channels : %d' % self.data.shape[0]
s += ', ~%s' % (sizeof_fmt(self._size),)
return "<AverageTFR | %s>" % s
@fill_doc
class EpochsTFR(_BaseTFR, GetEpochsMixin):
"""Container for Time-Frequency data on epochs.
Can for example store induced power at sensor level.
Parameters
----------
info : Info
The measurement info.
data : ndarray, shape (n_epochs, n_channels, n_freqs, n_times)
The data.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
comment : str | None, default None
Comment on the data, e.g., the experimental condition.
method : str | None, default None
Comment on the method used to compute the data, e.g., morlet wavelet.
events : ndarray, shape (n_events, 3) | None
The events as stored in the Epochs class. If None (default), all event
values are set to 1 and event time-samples are set to range(n_epochs).
event_id : dict | None
Example: dict(auditory=1, visual=3). They keys can be used to access
associated events. If None, all events will be used and a dict is
created with string integer names corresponding to the event id
integers.
metadata : instance of pandas.DataFrame | None
A :class:`pandas.DataFrame` containing pertinent information for each
trial. See :class:`mne.Epochs` for further details.
%(verbose)s
Attributes
----------
info : instance of Info
Measurement info.
ch_names : list
The names of the channels.
data : ndarray, shape (n_epochs, n_channels, n_freqs, n_times)
The data array.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
comment : string
Comment on dataset. Can be the condition.
method : str | None, default None
Comment on the method used to compute the data, e.g., morlet wavelet.
events : ndarray, shape (n_events, 3) | None
Array containing sample information as event_id
event_id : dict | None
Names of conditions correspond to event_ids
metadata : pandas.DataFrame, shape (n_events, n_cols) | None
DataFrame containing pertinent information for each trial
Notes
-----
.. versionadded:: 0.13.0
"""
@verbose
def __init__(self, info, data, times, freqs, comment=None, method=None,
events=None, event_id=None, metadata=None, verbose=None):
# noqa: D102
self.info = info
if data.ndim != 4:
raise ValueError('data should be 4d. Got %d.' % data.ndim)
n_epochs, n_channels, n_freqs, n_times = data.shape
if n_channels != len(info['chs']):
raise ValueError("Number of channels and data size don't match"
" (%d != %d)." % (n_channels, len(info['chs'])))
if n_freqs != len(freqs):
raise ValueError("Number of frequencies and data size don't match"
" (%d != %d)." % (n_freqs, len(freqs)))
if n_times != len(times):
raise ValueError("Number of times and data size don't match"
" (%d != %d)." % (n_times, len(times)))
if events is None:
n_epochs = len(data)
events = _gen_events(n_epochs)
event_id = _check_event_id(event_id, events)
self.data = data
self.times = np.array(times, dtype=float)
self.freqs = np.array(freqs, dtype=float)
self.events = events
self.event_id = event_id
self.comment = comment
self.method = method
self.preload = True
self.metadata = metadata
def __repr__(self): # noqa: D105
s = "time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
s += ", epochs : %d" % self.data.shape[0]
s += ', channels : %d' % self.data.shape[1]
s += ', ~%s' % (sizeof_fmt(self._size),)
return "<EpochsTFR | %s>" % s
def __abs__(self):
"""Take the absolute value."""
epochs = self.copy()
epochs.data = np.abs(self.data)
return epochs
def average(self):
"""Average the data across epochs.
Returns
-------
ave : instance of AverageTFR
The averaged data.
"""
data = np.mean(self.data, axis=0)
return AverageTFR(info=self.info.copy(), data=data,
times=self.times.copy(), freqs=self.freqs.copy(),
nave=self.data.shape[0], method=self.method,
comment=self.comment)
def combine_tfr(all_tfr, weights='nave'):
"""Merge AverageTFR data by weighted addition.
Create a new AverageTFR instance, using a combination of the supplied
instances as its data. By default, the mean (weighted by trials) is used.
Subtraction can be performed by passing negative weights (e.g., [1, -1]).
Data must have the same channels and the same time instants.
Parameters
----------
all_tfr : list of AverageTFR
The tfr datasets.
weights : list of float | str
The weights to apply to the data of each AverageTFR instance.
Can also be ``'nave'`` to weight according to tfr.nave,
or ``'equal'`` to use equal weighting (each weighted as ``1/N``).
Returns
-------
tfr : AverageTFR
The new TFR data.
Notes
-----
.. versionadded:: 0.11.0
"""
tfr = all_tfr[0].copy()
if isinstance(weights, str):
if weights not in ('nave', 'equal'):
raise ValueError('Weights must be a list of float, or "nave" or '
'"equal"')
if weights == 'nave':
weights = np.array([e.nave for e in all_tfr], float)
weights /= weights.sum()
else: # == 'equal'
weights = [1. / len(all_tfr)] * len(all_tfr)
weights = np.array(weights, float)
if weights.ndim != 1 or weights.size != len(all_tfr):
raise ValueError('Weights must be the same size as all_tfr')
ch_names = tfr.ch_names
for t_ in all_tfr[1:]:
assert t_.ch_names == ch_names, ValueError("%s and %s do not contain "
"the same channels"
% (tfr, t_))
assert np.max(np.abs(t_.times - tfr.times)) < 1e-7, \
ValueError("%s and %s do not contain the same time instants"
% (tfr, t_))
# use union of bad channels
bads = list(set(tfr.info['bads']).union(*(t_.info['bads']
for t_ in all_tfr[1:])))
tfr.info['bads'] = bads
# XXX : should be refactored with combined_evoked function
tfr.data = sum(w * t_.data for w, t_ in zip(weights, all_tfr))
tfr.nave = max(int(1. / sum(w ** 2 / e.nave
for w, e in zip(weights, all_tfr))), 1)
return tfr
# Utils
def _get_data(inst, return_itc):
"""Get data from Epochs or Evoked instance as epochs x ch x time."""
from ..epochs import BaseEpochs
from ..evoked import Evoked
if not isinstance(inst, (BaseEpochs, Evoked)):
raise TypeError('inst must be Epochs or Evoked')
if isinstance(inst, BaseEpochs):
data = inst.get_data()
else:
if return_itc:
raise ValueError('return_itc must be False for evoked data')
data = inst.data[np.newaxis].copy()
return data
def _prepare_picks(info, data, picks, axis):
"""Prepare the picks."""
picks = _picks_to_idx(info, picks, exclude='bads')
info = pick_info(info, picks)
sl = [slice(None)] * data.ndim
sl[axis] = picks
data = data[tuple(sl)]
return info, data
def _centered(arr, newsize):
"""Aux Function to center data."""
# Return the center newsize portion of the array.
newsize = np.asarray(newsize)
currsize = np.array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB, sfreq, copy=None):
"""Aux Function to prepare tfr computation."""
if copy is None:
copy = baseline is not None
data = rescale(data, times, baseline, mode, copy=copy)
# crop time
itmin, itmax = None, None
idx = np.where(_time_mask(times, tmin, tmax, sfreq=sfreq))[0]
if tmin is not None:
itmin = idx[0]
if tmax is not None:
itmax = idx[-1] + 1
times = times[itmin:itmax]
# crop freqs
ifmin, ifmax = None, None
idx = np.where(_time_mask(freqs, fmin, fmax, sfreq=sfreq))[0]
if fmin is not None:
ifmin = idx[0]
if fmax is not None:
ifmax = idx[-1] + 1
freqs = freqs[ifmin:ifmax]
# crop data
data = data[:, ifmin:ifmax, itmin:itmax]
if dB:
data = 10 * np.log10((data * data.conj()).real)
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
return data, times, freqs, vmin, vmax
def _check_decim(decim):
"""Aux function checking the decim parameter."""
_validate_type(decim, ('int-like', slice), 'decim')
if not isinstance(decim, slice):
decim = slice(None, None, int(decim))
# ensure that we can actually use `decim.step`
if decim.step is None:
decim = slice(decim.start, decim.stop, 1)
return decim
# i/o
def write_tfrs(fname, tfr, overwrite=False):
"""Write a TFR dataset to hdf5.
Parameters
----------
fname : str
The file name, which should end with ``-tfr.h5``.
tfr : AverageTFR instance, or list of AverageTFR instances
The TFR dataset, or list of TFR datasets, to save in one file.
Note. If .comment is not None, a name will be generated on the fly,
based on the order in which the TFR objects are passed.
overwrite : bool
If True, overwrite file (if it exists). Defaults to False.
See Also
--------
read_tfrs
Notes
-----
.. versionadded:: 0.9.0
"""
out = []
if not isinstance(tfr, (list, tuple)):
tfr = [tfr]
for ii, tfr_ in enumerate(tfr):
comment = ii if tfr_.comment is None else tfr_.comment
out.append(_prepare_write_tfr(tfr_, condition=comment))
write_hdf5(fname, out, overwrite=overwrite, title='mnepython',
slash='replace')
def _prepare_write_tfr(tfr, condition):
"""Aux function."""
attributes = dict(times=tfr.times, freqs=tfr.freqs, data=tfr.data,
info=tfr.info, comment=tfr.comment, method=tfr.method)
if hasattr(tfr, 'nave'): # if AverageTFR
attributes['nave'] = tfr.nave
elif hasattr(tfr, 'events'): # if EpochsTFR
attributes['events'] = tfr.events
attributes['event_id'] = tfr.event_id
attributes['metadata'] = _prepare_write_metadata(tfr.metadata)
return condition, attributes
def read_tfrs(fname, condition=None):
"""Read TFR datasets from hdf5 file.
Parameters
----------
fname : str
The file name, which should end with -tfr.h5 .
condition : int or str | list of int or str | None
The condition to load. If None, all conditions will be returned.
Defaults to None.
Returns
-------
tfrs : list of instances of AverageTFR | instance of AverageTFR
Depending on ``condition`` either the TFR object or a list of multiple
TFR objects.
See Also
--------
write_tfrs
Notes
-----
.. versionadded:: 0.9.0
"""
check_fname(fname, 'tfr', ('-tfr.h5', '_tfr.h5'))
logger.info('Reading %s ...' % fname)
tfr_data = read_hdf5(fname, title='mnepython', slash='replace')
for k, tfr in tfr_data:
tfr['info'] = Info(tfr['info'])
tfr['info']._check_consistency()
if 'metadata' in tfr:
tfr['metadata'] = _prepare_read_metadata(tfr['metadata'])
is_average = 'nave' in tfr
if condition is not None:
if not is_average:
raise NotImplementedError('condition not supported when reading '
'EpochsTFR.')
tfr_dict = dict(tfr_data)
if condition not in tfr_dict:
keys = ['%s' % k for k in tfr_dict]
raise ValueError('Cannot find condition ("{}") in this file. '
'The file contains "{}""'
.format(condition, " or ".join(keys)))
out = AverageTFR(**tfr_dict[condition])
else:
inst = AverageTFR if is_average else EpochsTFR
out = [inst(**d) for d in list(zip(*tfr_data))[1]]
return out
def _get_timefreqs(tfr, timefreqs):
"""Find and/or setup timefreqs for `tfr.plot_joint`."""
# Input check
timefreq_error_msg = (
"Supplied `timefreqs` are somehow malformed. Please supply None, "
"a list of tuple pairs, or a dict of such tuple pairs, not: ")
if isinstance(timefreqs, dict):
for k, v in timefreqs.items():
for item in (k, v):
if len(item) != 2 or any((not _is_numeric(n) for n in item)):
raise ValueError(timefreq_error_msg, item)
elif timefreqs is not None:
if not hasattr(timefreqs, "__len__"):
raise ValueError(timefreq_error_msg, timefreqs)
if len(timefreqs) == 2 and all((_is_numeric(v) for v in timefreqs)):
timefreqs = [tuple(timefreqs)] # stick a pair of numbers in a list
else:
for item in timefreqs:
if (hasattr(item, "__len__") and len(item) == 2 and
all((_is_numeric(n) for n in item))):
pass
else:
raise ValueError(timefreq_error_msg, item)
# If None, automatic identification of max peak
else:
from scipy.signal import argrelmax
order = max((1, tfr.data.shape[2] // 30))
peaks_idx = argrelmax(tfr.data, order=order, axis=2)
if peaks_idx[0].size == 0:
_, p_t, p_f = np.unravel_index(tfr.data.argmax(), tfr.data.shape)
timefreqs = [(tfr.times[p_t], tfr.freqs[p_f])]
else:
peaks = [tfr.data[0, f, t] for f, t in
zip(peaks_idx[1], peaks_idx[2])]
peakmax_idx = np.argmax(peaks)
peakmax_time = tfr.times[peaks_idx[2][peakmax_idx]]
peakmax_freq = tfr.freqs[peaks_idx[1][peakmax_idx]]
timefreqs = [(peakmax_time, peakmax_freq)]
timefreqs = {
tuple(k): np.asarray(timefreqs[k]) if isinstance(timefreqs, dict)
else np.array([0, 0]) for k in timefreqs}
return timefreqs
def _preproc_tfr_instance(tfr, picks, tmin, tmax, fmin, fmax, vmin, vmax, dB,
mode, baseline, exclude, copy=True):
"""Baseline and truncate (times and freqs) a TFR instance."""
tfr = tfr.copy() if copy else tfr
exclude = None if picks is None else exclude
picks = _picks_to_idx(tfr.info, picks, exclude='bads')
pick_names = [tfr.info['ch_names'][pick] for pick in picks]
tfr.pick_channels(pick_names)
if exclude == 'bads':
exclude = [ch for ch in tfr.info['bads']
if ch in tfr.info['ch_names']]
if exclude is not None:
tfr.drop_channels(exclude)
data, times, freqs, _, _ = _preproc_tfr(
tfr.data, tfr.times, tfr.freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB, tfr.info['sfreq'], copy=False)
tfr.times = times
tfr.freqs = freqs
tfr.data = data
return tfr
| bsd-3-clause |
ycaihua/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 33 | 6189 | import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
random_state = np.random.mtrand.RandomState(0)
@raises(ValueError)
def test_initialize_nn_input():
"""Test NNDSVD behaviour on negative input"""
nmf._initialize_nmf(-np.ones((2, 2)), 2)
def test_initialize_nn_output():
"""Test that NNDSVD does not return negative values"""
data = np.abs(random_state.randn(10, 10))
for var in (None, 'a', 'ar'):
W, H = nmf._initialize_nmf(data, 10, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_initialize_close():
"""Test NNDSVD error
Test that _initialize_nmf error is less than the standard deviation of the
entries in the matrix.
"""
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10)
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
"""Test NNDSVD variants correctness
Test that the variants 'a' and 'ar' differ from basic NNDSVD only where
the basic version has zeros.
"""
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, variant=None)
Wa, Ha = nmf._initialize_nmf(data, 10, variant='a')
War, Har = nmf._initialize_nmf(data, 10, variant='ar', random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@raises(ValueError)
def test_projgrad_nmf_fit_nn_input():
"""Test model fit behaviour on negative input"""
A = -np.ones((2, 2))
m = nmf.ProjectedGradientNMF(n_components=2, init=None, random_state=0)
m.fit(A)
def test_projgrad_nmf_fit_nn_output():
"""Test that the decomposition does not contain negative values"""
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.ProjectedGradientNMF(n_components=2, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_projgrad_nmf_fit_close():
"""Test that the fit is not too far away"""
pnmf = nmf.ProjectedGradientNMF(5, init='nndsvda', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
"""Test that NLS solver doesn't return negative values"""
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
"""Test that the NLS results should be close"""
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
def test_projgrad_nmf_transform():
"""Test that NMF.transform returns close values
(transform uses scipy.optimize.nnls for now)
"""
A = np.abs(random_state.randn(6, 5))
m = nmf.ProjectedGradientNMF(n_components=5, init='nndsvd', random_state=0)
transf = m.fit_transform(A)
assert_true(np.allclose(transf, m.transform(A), atol=1e-2, rtol=0))
def test_n_components_greater_n_features():
"""Smoke test for the case of more components than features."""
A = np.abs(random_state.randn(30, 10))
nmf.ProjectedGradientNMF(n_components=15, sparseness='data',
random_state=0).fit(A)
def test_projgrad_nmf_sparseness():
"""Test sparseness
Test that sparsity constraints actually increase sparseness in the
part where they are applied.
"""
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
def test_sparse_input():
"""Test that sparse matrices are accepted as input"""
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
T1 = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999).fit_transform(A)
A_sparse = csc_matrix(A)
pg_nmf = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999)
T2 = pg_nmf.fit_transform(A_sparse)
assert_array_almost_equal(pg_nmf.reconstruction_err_,
linalg.norm(A - np.dot(T2, pg_nmf.components_),
'fro'))
assert_array_almost_equal(T1, T2)
# same with sparseness
T2 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A_sparse)
T1 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A)
def test_sparse_transform():
"""Test that transform works on sparse data. Issue #2124"""
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(5, 4))
A[A > 1.0] = 0
A = csc_matrix(A)
model = nmf.NMF()
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
# This solver seems pretty inconsistent
assert_array_almost_equal(A_fit_tr, A_tr, decimal=2)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
charany1/content | labs/lab2/cs109style.py | 38 | 1293 | from __future__ import print_function
from IPython.core.display import HTML
from matplotlib import rcParams
#colorbrewer2 Dark2 qualitative color table
dark2_colors = [(0.10588235294117647, 0.6196078431372549, 0.4666666666666667),
(0.8509803921568627, 0.37254901960784315, 0.00784313725490196),
(0.4588235294117647, 0.4392156862745098, 0.7019607843137254),
(0.9058823529411765, 0.1607843137254902, 0.5411764705882353),
(0.4, 0.6509803921568628, 0.11764705882352941),
(0.9019607843137255, 0.6705882352941176, 0.00784313725490196),
(0.6509803921568628, 0.4627450980392157, 0.11372549019607843),
(0.4, 0.4, 0.4)]
def customize_mpl():
"""Tweak matplotlib visual style"""
print("Setting custom matplotlib visual style")
rcParams['figure.figsize'] = (10, 6)
rcParams['figure.dpi'] = 150
rcParams['axes.color_cycle'] = dark2_colors
rcParams['lines.linewidth'] = 2
rcParams['axes.grid'] = True
rcParams['axes.facecolor'] = '#eeeeee'
rcParams['font.size'] = 14
rcParams['patch.edgecolor'] = 'none'
def customize_css():
print("Setting custom CSS for the IPython Notebook")
styles = open('custom.css', 'r').read()
return HTML(styles)
| mit |
themrmax/scikit-learn | sklearn/tests/test_metaestimators.py | 52 | 4990 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba',
'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
rcmorehead/simplanets | testandplot.py | 1 | 6271 | import pickle
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from scipy import stats
import numpy as np
import triangle
from matplotlib.backends.backend_pdf import PdfPages
import simple_model
from scipy.optimize import minimize
def opt_bin(A,B):
bounds = [A.min(), B.min(), A.max(), B.max()]
bounds.sort()
sizes = [np.sqrt(A.size), np.sqrt(B.size)]
sizes.sort()
return np.linspace(bounds[0], bounds[3], sizes[1])
def neg(x, function=None):
return -function(x)
def lookatresults(data, name, modes):
plots, thetas = [], []
P = data[-1][0]
for i in xrange(len(P)):
x = P[i]
theta = r'$\theta_{3:}$ {1:.2f} +{2:.2f}/-{0:.2f}'.format(
modes[i]-stats.scoreatpercentile(x, 16),
modes[i],
stats.scoreatpercentile(x, 84)-modes[i], i+1)
thetas.append(r'$\theta_{}$'.format(i+1))
f = plt.figure()
plt.suptitle(name)
plt.subplot(111)
plt.title(theta)
ker = stats.gaussian_kde(x)
plt.hist(x, normed=True, alpha=0.2)
X = np.linspace(0.0, max(x) + .1*max(x), 1000)
plt.plot(X,ker(X))
plt.xlabel(r"$\theta_{}$".format(i+1))
#plt.savefig('theta_{}.png'.format(i))
plots.append(f)
f = plt.figure()
plt.subplot(211)
plt.plot(data['epsilon'], 'o-')
plt.title(r'$\epsilon$')
plt.subplot(212)
plt.plot(data['n total'], 'o-')
plt.title('N Trials')
plots.append(f)
alphas = np.linspace(0, 1, data.size)
for j in xrange(len(data[0][0])):
f = plt.figure()
for i, D in enumerate(data):
F = stats.gaussian_kde(D[0][j])
x = np.linspace(D[0][j].min(), D[0][j].max(), 300)
plt.plot(x, F(x), alpha=alphas[i])
plt.xlabel(r"$\theta_{}$".format(j+1))
if i == data.size - 1:
plt.plot(x, F(x), c='m', ls='--', lw=2, zorder=1)
plots.append(f)
plt.figure()
f = triangle.corner(P.T, labels=thetas)
#plt.savefig('trianle.png'.format(i))
plots.append(f)
return plots
def plot_modes(obs, modes, stars, model, data):
plots = []
synth = model.generate_data(modes)
synth_stats = model.summary_stats(synth)
obs_stats = model.summary_stats(obs)
f = plt.figure()
plt.suptitle('Obs Cand.:{}; Sim Cand.:{}'.format(obs.size, synth.size))
plt.rc('legend', fontsize='xx-small', frameon=False)
plt.subplot(121)
bins = opt_bin(obs_stats[0],synth_stats[0])
plt.hist(obs_stats[0], bins=bins, histtype='step', label='Data')
plt.hist(synth_stats[0], bins=bins, histtype='step', label='Simulation')
plt.xlabel(r'$\xi$')
plt.legend()
plt.subplot(122)
bins = opt_bin(obs_stats[1],synth_stats[1])
plt.hist(obs_stats[1], bins=np.arange(bins.min()-0.5, bins.max()+1.5,
1),
histtype='step', label='Data', log=True)
plt.hist(synth_stats[1], bins=np.arange(bins.min()-0.5, bins.max()+1.5,
1),
histtype='step', label='Simulation', log=True)
plt.xlabel(r'$N_p$')
plt.legend()
plots.append(f)
f = plt.figure()
plt.suptitle('Normalized Planet Counts')
plt.subplot(121)
plt.hist(obs_stats[1], bins=np.arange(bins.min()-0.5, bins.max()+1.5,
1),
histtype='step', label='Data', normed=True)
plt.hist(synth_stats[1], bins=np.arange(bins.min()-0.5, bins.max()+1.5,
1),
histtype='step', label='Simulation', normed=True)
plt.xlabel(r'$N_p$')
plt.legend()
plt.subplot(122)
plt.hist(obs_stats[1], bins=np.arange(bins.min()-0.5, bins.max()+1.5,
1),
histtype='step', label='Data', log=True, normed=True)
plt.hist(synth_stats[1], bins=np.arange(bins.min()-0.5, bins.max()+1.5,
1),
histtype='step', label='Simulation', log=True, normed=True)
plt.xlabel(r'$N_p$')
plt.legend()
plots.append(f)
D = np.zeros(100)
for i in xrange(100):
S = model.generate_data(modes)
SS = model.summary_stats(S)
D[i] = model.distance_function(obs_stats, SS)
f = plt.figure()
bins = opt_bin(D, np.array(data[-1]['D accepted']))
plt.hist(D, bins=bins, histtype='step', label='Modes')
plt.hist(data[-1]['D accepted'], bins=bins,
histtype='step', label='PMC')
plt.axvline(data[-1]['epsilon'], lw=3, label=r'$\epsilon$')
plt.legend()
plots.append(f)
for i in synth.dtype.names:
f = plt.figure()
bins = np.linspace(synth[i].min(), synth[i].max(),
np.sqrt(synth[i].size))
plt.hist(synth[i], bins=bins, histtype='step', label='Sim')
plt.xlabel(i)
plt.title('min={:.4f}; max={:.4f}'.format(synth[i].min(),
synth[i].max()))
if i in obs.dtype.names:
pass
#plt.hist(obs[i], bins=bins, histtype='step', label='Data')
#plt.legend()
plots.append(f)
return plots
def main():
data = pickle.load(file(sys.argv[1]))
obs = pickle.load(file("/".join(sys.argv[1].split('/')[:-1] +
['obs_data.pkl'])))
stars = pickle.load(file('stars.pkl'))
model = simple_model.MyModel(stars)
f = stats.gaussian_kde(data[-1][0])
int_guess = np.mean(data[-1][0], axis=1)
modes = minimize(neg, int_guess, args=(f)).x
results = lookatresults(data, sys.argv[1], modes)
results = results + plot_modes(obs, modes, stars, model, data)
report_plot = PdfPages(sys.argv[1].replace('.pkl', '_testplots.pdf'))
for f in results:
report_plot.savefig(f)
report_plot.close()
if __name__ == "__main__":
main() | mit |
DuCorey/bokeh | bokeh/models/tests/test_sources.py | 1 | 21218 | from __future__ import absolute_import
import unittest
from unittest import skipIf
import warnings
import numpy as np
try:
import pandas as pd
is_pandas = True
except ImportError as e:
is_pandas = False
from bokeh.models.sources import DataSource, ColumnDataSource
from bokeh.util.serialization import transform_column_source_data
class TestColumnDataSource(unittest.TestCase):
def test_basic(self):
ds = ColumnDataSource()
self.assertTrue(isinstance(ds, DataSource))
def test_init_dict_arg(self):
data = dict(a=[1], b=[2])
ds = ColumnDataSource(data)
self.assertEquals(ds.data, data)
self.assertEquals(set(ds.column_names), set(data.keys()))
def test_init_dict_data_kwarg(self):
data = dict(a=[1], b=[2])
ds = ColumnDataSource(data=data)
self.assertEquals(ds.data, data)
self.assertEquals(set(ds.column_names), set(data.keys()))
@skipIf(not is_pandas, "pandas not installed")
def test_init_dataframe_arg(self):
data = dict(a=[1, 2], b=[2, 3])
df = pd.DataFrame(data)
ds = ColumnDataSource(df)
self.assertTrue(set(df.columns).issubset(set(ds.column_names)))
for key in data.keys():
self.assertIsInstance(ds.data[key], np.ndarray)
self.assertEquals(list(df[key]), list(ds.data[key]))
self.assertIsInstance(ds.data['index'], np.ndarray)
self.assertEquals([0, 1], list(ds.data['index']))
self.assertEqual(set(ds.column_names) - set(df.columns), set(["index"]))
@skipIf(not is_pandas, "pandas not installed")
def test_init_dataframe_data_kwarg(self):
data = dict(a=[1, 2], b=[2, 3])
df = pd.DataFrame(data)
ds = ColumnDataSource(data=df)
self.assertTrue(set(df.columns).issubset(set(ds.column_names)))
for key in data.keys():
self.assertIsInstance(ds.data[key], np.ndarray)
self.assertEquals(list(df[key]), list(ds.data[key]))
self.assertIsInstance(ds.data['index'], np.ndarray)
self.assertEquals([0, 1], list(ds.data['index']))
self.assertEqual(set(ds.column_names) - set(df.columns), set(["index"]))
@skipIf(not is_pandas, "pandas not installed")
def test_init_groupby_arg(self):
from bokeh.sampledata.autompg import autompg as df
group = df.groupby(('origin', 'cyl'))
ds = ColumnDataSource(group)
s = group.describe()
self.assertTrue(len(ds.column_names)) == 41
self.assertIsInstance(ds.data['origin_cyl'], np.ndarray)
for key in s.columns.values:
k2 = "_".join(key)
self.assertIsInstance(ds.data[k2], np.ndarray)
self.assertEquals(list(s[key]), list(ds.data[k2]))
@skipIf(not is_pandas, "pandas not installed")
def test_init_groupby_data_kwarg(self):
from bokeh.sampledata.autompg import autompg as df
group = df.groupby(('origin', 'cyl'))
ds = ColumnDataSource(data=group)
s = group.describe()
self.assertTrue(len(ds.column_names)) == 41
self.assertIsInstance(ds.data['origin_cyl'], np.ndarray)
for key in s.columns.values:
k2 = "_".join(key)
self.assertIsInstance(ds.data[k2], np.ndarray)
self.assertEquals(list(s[key]), list(ds.data[k2]))
@skipIf(not is_pandas, "pandas not installed")
def test_init_groupby_with_None_subindex_name(self):
df = pd.DataFrame({"A": [1, 2, 3, 4] * 2, "B": [10, 20, 30, 40] * 2, "C": range(8)})
group = df.groupby(['A', [10, 20, 30, 40] * 2])
ds = ColumnDataSource(data=group)
s = group.describe()
self.assertTrue(len(ds.column_names)) == 41
self.assertIsInstance(ds.data['index'], np.ndarray)
for key in s.columns.values:
k2 = "_".join(key)
self.assertIsInstance(ds.data[k2], np.ndarray)
self.assertEquals(list(s[key]), list(ds.data[k2]))
def test_add_with_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], name="foo")
self.assertEquals(name, "foo")
name = ds.add([4,5,6], name="bar")
self.assertEquals(name, "bar")
def test_add_without_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3])
self.assertEquals(name, "Series 0")
name = ds.add([4,5,6])
self.assertEquals(name, "Series 1")
def test_add_with_and_without_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], "foo")
self.assertEquals(name, "foo")
name = ds.add([4,5,6])
self.assertEquals(name, "Series 1")
def test_remove_exists(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], "foo")
assert name
ds.remove("foo")
self.assertEquals(ds.column_names, [])
def test_remove_exists2(self):
with warnings.catch_warnings(record=True) as w:
ds = ColumnDataSource()
ds.remove("foo")
self.assertEquals(ds.column_names, [])
self.assertEquals(len(w), 1)
self.assertEquals(w[0].category, UserWarning)
self.assertEquals(str(w[0].message), "Unable to find column 'foo' in data source")
def test_stream_bad_data(self):
ds = ColumnDataSource(data=dict(a=[10], b=[20]))
with self.assertRaises(ValueError) as cm:
ds.stream(dict())
self.assertEqual(str(cm.exception), "Must stream updates to all existing columns (missing: a, b)")
with self.assertRaises(ValueError) as cm:
ds.stream(dict(a=[10]))
self.assertEqual(str(cm.exception), "Must stream updates to all existing columns (missing: b)")
with self.assertRaises(ValueError) as cm:
ds.stream(dict(a=[10], b=[10], x=[10]))
self.assertEqual(str(cm.exception), "Must stream updates to all existing columns (extra: x)")
with self.assertRaises(ValueError) as cm:
ds.stream(dict(a=[10], x=[10]))
self.assertEqual(str(cm.exception), "Must stream updates to all existing columns (missing: b, extra: x)")
with self.assertRaises(ValueError) as cm:
ds.stream(dict(a=[10], b=[10, 20]))
self.assertEqual(str(cm.exception), "All streaming column updates must be the same length")
with self.assertRaises(ValueError) as cm:
ds.stream(dict(a=[10], b=np.ones((1,1))))
self.assertTrue(
str(cm.exception).startswith("stream(...) only supports 1d sequences, got ndarray with size (")
)
def test__stream_good_data(self):
ds = ColumnDataSource(data=dict(a=[10], b=[20]))
ds._document = "doc"
stuff = {}
mock_setter = object()
def mock(*args, **kw):
stuff['args'] = args
stuff['kw'] = kw
ds.data._stream = mock
# internal implementation of stream
ds._stream(dict(a=[11, 12], b=[21, 22]), "foo", mock_setter)
self.assertEqual(stuff['args'], ("doc", ds, dict(a=[11, 12], b=[21, 22]), "foo", mock_setter))
self.assertEqual(stuff['kw'], {})
def test_stream_good_data(self):
ds = ColumnDataSource(data=dict(a=[10], b=[20]))
ds._document = "doc"
stuff = {}
def mock(*args, **kw):
stuff['args'] = args
stuff['kw'] = kw
ds.data._stream = mock
# public implementation of stream
ds._stream(dict(a=[11, 12], b=[21, 22]), "foo")
self.assertEqual(stuff['args'], ("doc", ds, dict(a=[11, 12], b=[21, 22]), "foo", None))
self.assertEqual(stuff['kw'], {})
def _assert_equal_dicts_of_arrays(self, d1, d2):
self.assertEqual(d1.keys(), d2.keys())
for k, v in d1.items():
self.assertEqual(type(v), type(d2[k]))
self.assertTrue(np.array_equal(v, d2[k]))
@skipIf(not is_pandas, "pandas not installed")
def test_stream_dict_to_ds_created_from_df(self):
data = pd.DataFrame(dict(a=[10], b=[20], c=[30])).set_index('c')
ds = ColumnDataSource(data)
ds._document = "doc"
notify_owners_stuff = {}
def notify_owners_mock(*args, **kw):
notify_owners_stuff['args'] = args
notify_owners_stuff['kw'] = kw
ds.data._notify_owners = notify_owners_mock
stream_stuff = {}
data_stream = ds.data._stream
def stream_wrapper(*args, **kwargs):
stream_stuff['args'] = args
stream_stuff['kwargs'] = kwargs
data_stream(*args, **kwargs)
ds.data._stream = stream_wrapper
ds._stream(dict(a=[11, 12],
b=np.array([21, 22]),
c=pd.Series([31, 32])), 7)
self.assertEqual(len(stream_stuff['args']), 5)
expected_stream_args = ("doc", ds, dict(a=[11, 12],
b=np.array([21, 22]),
c=pd.Series([31, 32])), 7, None)
for i, (arg, ex_arg) in enumerate(zip(stream_stuff['args'],
expected_stream_args)):
if i == 2:
self.assertEqual(arg['a'], ex_arg['a'])
del arg['a'], ex_arg['a']
self._assert_equal_dicts_of_arrays(arg, ex_arg)
else:
self.assertEqual(arg, ex_arg)
self.assertEqual(stream_stuff['kwargs'], {})
self.assertEqual(len(notify_owners_stuff['args']), 1)
self._assert_equal_dicts_of_arrays(notify_owners_stuff['args'][0],
dict(a=np.array([10]),
b=np.array([20]),
c=np.array([30])))
self._assert_equal_dicts_of_arrays(dict(ds.data),
dict(a=np.array([10, 11, 12]),
b=np.array([20, 21, 22]),
c=np.array([30, 31, 32])))
@skipIf(not is_pandas, "pandas not installed")
def test_stream_series_to_ds_created_from_df(self):
data = pd.DataFrame(dict(a=[10], b=[20], c=[30])).set_index('c')
ds = ColumnDataSource(data)
ds._document = "doc"
notify_owners_stuff = {}
def notify_owners_mock(*args, **kw):
notify_owners_stuff['args'] = args
notify_owners_stuff['kw'] = kw
ds.data._notify_owners = notify_owners_mock
stream_stuff = {}
data_stream = ds.data._stream
def stream_wrapper(*args, **kwargs):
stream_stuff['args'] = args
stream_stuff['kwargs'] = kwargs
data_stream(*args, **kwargs)
ds.data._stream = stream_wrapper
ds._stream(pd.Series([11, 21, 31], index=list('abc')), 7)
self.assertEqual(len(stream_stuff['args']), 5)
expected_stream_args = ("doc", ds, dict(a=np.array([11]),
b=np.array([21]),
c=np.array([31])), 7, None)
for i, (arg, ex_arg) in enumerate(zip(stream_stuff['args'],
expected_stream_args)):
if i == 2:
arg = {k: v.values for k, v in arg.to_dict('series').items()}
self._assert_equal_dicts_of_arrays(arg, ex_arg)
else:
self.assertEqual(arg, ex_arg)
self.assertEqual(stream_stuff['kwargs'], {})
self.assertEqual(len(notify_owners_stuff['args']), 1)
self._assert_equal_dicts_of_arrays(notify_owners_stuff['args'][0],
dict(a=np.array([10]),
b=np.array([20]),
c=np.array([30])))
self._assert_equal_dicts_of_arrays(dict(ds.data),
dict(a=np.array([10, 11]),
b=np.array([20, 21]),
c=np.array([30, 31])))
@skipIf(not is_pandas, "pandas not installed")
def test_stream_df_to_ds_created_from_df(self):
data = pd.DataFrame(dict(a=[10], b=[20], c=[30])).set_index('c')
ds = ColumnDataSource(data)
ds._document = "doc"
notify_owners_stuff = {}
def notify_owners_mock(*args, **kw):
notify_owners_stuff['args'] = args
notify_owners_stuff['kw'] = kw
ds.data._notify_owners = notify_owners_mock
stream_stuff = {}
data_stream = ds.data._stream
def stream_wrapper(*args, **kwargs):
stream_stuff['args'] = args
stream_stuff['kwargs'] = kwargs
data_stream(*args, **kwargs)
ds.data._stream = stream_wrapper
ds._stream(pd.DataFrame(dict(a=[11, 12],
b=[21, 22],
c=[31, 32])), 7)
self.assertEqual(len(stream_stuff['args']), 5)
expected_steam_data = dict(a=np.array([11, 12]),
b=np.array([21, 22]),
c=np.array([31, 32]))
expected_args = ("doc", ds, expected_steam_data, 7, None)
for i, (arg, ex_arg) in enumerate(zip(stream_stuff['args'], expected_args)):
if i == 2:
arg = {k: v.values for k, v in arg.to_dict('series').items()}
self.assertEqual(arg.keys(), ex_arg.keys())
for k, v in arg.items():
self.assertTrue(np.array_equal(v, ex_arg[k]))
else:
self.assertEqual(stream_stuff['args'][i], expected_args[i])
self.assertEqual(stream_stuff['kwargs'], {})
self.assertEqual(len(notify_owners_stuff['args']), 1)
self._assert_equal_dicts_of_arrays(notify_owners_stuff['args'][0],
dict(a=np.array([10]),
b=np.array([20]),
c=np.array([30])))
self._assert_equal_dicts_of_arrays(dict(ds.data),
dict(a=np.array([10, 11, 12]),
b=np.array([20, 21, 22]),
c=np.array([30, 31, 32])))
def test_patch_bad_columns(self):
ds = ColumnDataSource(data=dict(a=[10, 11], b=[20, 21]))
with self.assertRaises(ValueError) as cm:
ds.patch(dict(c=[(0, 100)]))
self.assertEqual(str(cm.exception), "Can only patch existing columns (extra: c)")
with self.assertRaises(ValueError) as cm:
ds.patch(dict(a=[(0,100)], c=[(0, 100)], d=[(0, 100)]))
self.assertEqual(str(cm.exception), "Can only patch existing columns (extra: c, d)")
def test_patch_bad_simple_indices(self):
ds = ColumnDataSource(data=dict(a=[10, 11], b=[20, 21]))
with self.assertRaises(ValueError) as cm:
ds.patch(dict(a=[(3, 100)]))
self.assertEqual(str(cm.exception), "Out-of bounds index (3) in patch for column: a")
def test_patch_good_simple_indices(self):
ds = ColumnDataSource(data=dict(a=[10, 11], b=[20, 21]))
ds._document = "doc"
stuff = {}
mock_setter = object()
def mock(*args, **kw):
stuff['args'] = args
stuff['kw'] = kw
ds.data._patch = mock
ds.patch(dict(a=[(0,100), (1,101)], b=[(0,200)]), mock_setter)
self.assertEqual(stuff['args'], ("doc", ds, dict(a=[(0,100), (1,101)], b=[(0,200)]), mock_setter))
self.assertEqual(stuff['kw'], {})
def test_patch_bad_slice_indices(self):
ds = ColumnDataSource(data=dict(a=[10, 11, 12, 13, 14, 15], b=[20, 21, 22, 23, 24, 25]))
with self.assertRaises(ValueError) as cm:
ds.patch(dict(a=[(slice(10), list(range(10)))]))
self.assertEqual(str(cm.exception), "Out-of bounds slice index stop (10) in patch for column: a")
with self.assertRaises(ValueError) as cm:
ds.patch(dict(a=[(slice(10, 1), list(range(10)))]))
self.assertEqual(str(cm.exception), "Patch slices must have start < end, got slice(10, 1, None)")
with self.assertRaises(ValueError) as cm:
ds.patch(dict(a=[(slice(None, 10, -1), list(range(10)))]))
self.assertEqual(str(cm.exception), "Patch slices must have positive (start, stop, step) values, got slice(None, 10, -1)")
with self.assertRaises(ValueError) as cm:
ds.patch(dict(a=[(slice(10, 1, 1), list(range(10)))]))
self.assertEqual(str(cm.exception), "Patch slices must have start < end, got slice(10, 1, 1)")
with self.assertRaises(ValueError) as cm:
ds.patch(dict(a=[(slice(10, 1, -1), list(range(10)))]))
self.assertEqual(str(cm.exception), "Patch slices must have start < end, got slice(10, 1, -1)")
with self.assertRaises(ValueError) as cm:
ds.patch(dict(a=[(slice(1, 10, -1), list(range(10)))]))
self.assertEqual(str(cm.exception), "Patch slices must have positive (start, stop, step) values, got slice(1, 10, -1)")
def test_patch_good_slice_indices(self):
ds = ColumnDataSource(data=dict(a=[10, 11, 12, 13, 14, 15], b=[20, 21, 22, 23, 24, 25]))
ds._document = "doc"
stuff = {}
mock_setter = object()
def mock(*args, **kw):
stuff['args'] = args
stuff['kw'] = kw
ds.data._patch = mock
ds.patch(dict(a=[(slice(2), [100, 101]), (slice(3, 5), [100, 101])], b=[(slice(None, None, 2), [100, 101, 102])]), mock_setter)
self.assertEqual(stuff['args'],
("doc", ds, dict(a=[(slice(2), [100, 101]), (slice(3, 5), [100, 101])], b=[(slice(None, None, 2), [100, 101, 102])]), mock_setter)
)
self.assertEqual(stuff['kw'], {})
def test_data_column_lengths(self):
# TODO: use this when soft=False
#
#with self.assertRaises(ValueError):
# ColumnDataSource(data=dict(a=[10, 11], b=[20, 21, 22]))
#
#ds = ColumnDataSource()
#with self.assertRaises(ValueError):
# ds.data = dict(a=[10, 11], b=[20, 21, 22])
#
#ds = ColumnDataSource(data=dict(a=[10, 11]))
#with self.assertRaises(ValueError):
# ds.data["b"] = [20, 21, 22]
#
#ds = ColumnDataSource(data=dict(a=[10, 11], b=[20, 21]))
#with self.assertRaises(ValueError):
# ds.data.update(dict(a=[10, 11, 12]))
with warnings.catch_warnings(record=True) as warns:
ColumnDataSource(data=dict(a=[10, 11], b=[20, 21, 22]))
self.assertEquals(len(warns), 1)
self.assertEquals(str(warns[0].message), "ColumnDataSource's columns must be of the same length. Current lengths: ('a', 2), ('b', 3)")
ds = ColumnDataSource()
with warnings.catch_warnings(record=True) as warns:
ds.data = dict(a=[10, 11], b=[20, 21, 22])
self.assertEquals(len(warns), 1)
self.assertEquals(str(warns[0].message), "ColumnDataSource's columns must be of the same length. Current lengths: ('a', 2), ('b', 3)")
ds = ColumnDataSource(data=dict(a=[10, 11]))
with warnings.catch_warnings(record=True) as warns:
ds.data["b"] = [20, 21, 22]
self.assertEquals(len(warns), 1)
self.assertEquals(str(warns[0].message), "ColumnDataSource's columns must be of the same length. Current lengths: ('a', 2), ('b', 3)")
ds = ColumnDataSource(data=dict(a=[10, 11], b=[20, 21]))
with warnings.catch_warnings(record=True) as warns:
ds.data.update(dict(a=[10, 11, 12]))
self.assertEquals(len(warns), 1)
self.assertEquals(str(warns[0].message), "ColumnDataSource's columns must be of the same length. Current lengths: ('a', 3), ('b', 2)")
def test_set_data_from_json_list(self):
ds = ColumnDataSource()
data = {"foo": [1, 2, 3]}
ds.set_from_json('data', data)
self.assertEquals(ds.data, data)
def test_set_data_from_json_base64(self):
ds = ColumnDataSource()
data = {"foo": np.arange(3)}
json = transform_column_source_data(data)
ds.set_from_json('data', json)
self.assertTrue(np.array_equal(ds.data["foo"], data["foo"]))
def test_set_data_from_json_nested_base64(self):
ds = ColumnDataSource()
data = {"foo": [[np.arange(3)]]}
json = transform_column_source_data(data)
ds.set_from_json('data', json)
self.assertTrue(np.array_equal(ds.data["foo"], data["foo"]))
def test_set_data_from_json_nested_base64_and_list(self):
ds = ColumnDataSource()
data = {"foo": [np.arange(3), [1, 2, 3]]}
json = transform_column_source_data(data)
ds.set_from_json('data', json)
self.assertTrue(np.array_equal(ds.data["foo"], data["foo"]))
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
dingocuster/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 103 | 22297 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
| bsd-3-clause |
nrhine1/scikit-learn | sklearn/utils/testing.py | 84 | 24860 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV",
"RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independance)"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| bsd-3-clause |
CMUSV-VisTrails/WorkflowRecommendation | vistrails/packages/componentSearch/networkx_graph.py | 1 | 17801 | '''
Created on Oct 21, 2012
@author: xiaoxiao
'''
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import Qt, QVariant
from PyQt4.QtGui import QStandardItemModel, QTableView, QVBoxLayout, QLabel
from PyQt4.QtGui import QSizePolicy
import random
import networkx as nx
import matplotlib
import matplotlib.pyplot
from db.programmableweb.mongo_source import DataSource
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
'''
Created on Sep 19, 2012
@author: xiaoxiao
'''
def get_api_name(api):
return str(api['id'])[(len("http://www.programmableweb.com/api/")):]
def get_mashup_name(mashup):
return str(mashup['id'])[(len("http://www.programmableweb.com/mashup/")):]
data_source = DataSource()
mashups = data_source.mashups()
class MplCanvas(FigureCanvas):
api_map = data_source.api_with_mashups()
mashup_map = data_source.mashup_with_apis()
def __init__(self):
self.fig = Figure()
self.ax = self.fig.add_subplot(111)
# self.ax.hold(False)
FigureCanvas.__init__(self, self.fig)
FigureCanvas.setSizePolicy(self, QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def draw_related_mashup(self, mashups, current_mashup=None, highlight_mashup=None):
"""
Draw the realated mashup graph
"""
self.ax.clear()
layout = {}
g = nx.Graph()
node_size = {}
node_color = {}
node_map = {}
node_id = 0
for mashup in mashups:
if node_map.get(mashup["id"]) == None:
node_map[mashup["id"]] = node_id
g.add_node(node_id)
node_size[node_id] = 20
if current_mashup and mashup["id"] == current_mashup["id"]:
node_color[node_id] = 0.7
node_size[node_id] = 180
layout[node_id] = (random.random() , random.random())
else:
node_color[node_id] = 0.5
layout[node_id] = (random.random() , random.random())
node_id = node_id + 1
for i in range(0, len(mashups)):
node_id_start = node_map.get(mashups[i]["id"])
node_id_end = node_map.get(current_mashup["id"])
g.add_edge(node_id_start, node_id_end)
try:
nx.draw(g, pos=layout, node_size=[node_size[v] for v in g.nodes()], node_color=[node_color[v] for v in g.nodes()], with_labels=False)
except Exception, e:
print e
self.draw()
matplotlib.pyplot.show()
def draw_related_api(self, apis, current_api=None, highlight_api=None):
"""
Draw the realated api graph
"""
self.ax.clear()
layout = {}
g = nx.Graph()
node_size = {}
node_color = {}
node_map = {}
node_labels = {}
node_id = 0
for api in apis:
if node_map.get(api["id"]) == None:
node_map[api["id"]] = node_id
g.add_node(node_id)
node_size[node_id] = 20
node_labels[node_id] = get_api_name(api)
layout[node_id] = (random.random() , random.random())
if current_api and api["id"] == current_api["id"]:
node_color[node_id] = 0.7
node_size[node_id] = 180
elif highlight_api and api["id"] == highlight_api["id"]:
node_color[node_id] = 0.3
node_size[node_id] = 180
else:
node_color[node_id] = 0.5
node_id = node_id + 1
for i in range(0, len(apis)):
node_id_start = node_map.get(apis[i]["id"])
node_id_end = node_map.get(current_api["id"])
g.add_edge(node_id_start, node_id_end)
try:
with_labels = False
if len(apis) < 20:
with_labels = True
nx.draw(g, pos=layout, labels=node_labels, node_size=[node_size[v] for v in g.nodes()], node_color=[node_color[v] for v in g.nodes()], with_labels=with_labels)
except Exception, e:
print e
self.draw()
matplotlib.pyplot.show()
def draw_api(self, emphasize=False):
self.ax.clear()
layout = {}
g = nx.Graph()
node_size = {}
node_color = {}
node_map = {}
node_id = 0
for key in self.mashup_map:
for api in self.mashup_map[key]:
if node_map.get(api) == None:
node_map[api] = node_id
g.add_node(node_id)
node_size[node_id] = 20
if (emphasize):
node_color[node_id] = 0.7
node_size[node_id] = 180
emphasize = False;
else:
node_color[node_id] = 0.5
layout[node_id] = (random.random() , random.random())
node_id = node_id + 1
for i in range(0, len(self.mashup_map[key])):
for j in range(0, len(self.mashup_map[key])):
node_id_start = node_map.get(self.mashup_map[key][i])
node_id_end = node_map.get(self.mashup_map[key][j])
g.add_edge(node_id_start, node_id_end)
# node_size[node_id_start] = node_size[node_id_start] + 5
# node_size[node_id_end] = node_size[node_id_end] + 5
try:
nx.draw(g, pos=layout, ax=self.ax, node_size=[node_size[v] for v in g.nodes()], node_color=[node_color[v] for v in g.nodes()], with_labels=False)
except Exception, e:
print e
self.draw()
def draw_mashup(self, emphasize=False):
self.ax.clear()
layout = {}
g = nx.Graph()
node_size = {}
node_color = {}
node_map = {}
node_id = 0
for key in self.api_map:
if len(self.api_map[key]) == 8:
for api in self.api_map[key]:
if node_map.get(api) == None:
node_map[api] = node_id
g.add_node(node_id)
node_size[node_id] = 20
if (emphasize):
node_color[node_id] = 0.7
node_size[node_id] = 180
emphasize = False;
else:
node_color[node_id] = 0.5
layout[node_id] = (random.random() , random.random())
node_id = node_id + 1
for i in range(0, len(self.api_map[key])):
for j in range(0, len(self.api_map[key])):
node_id_start = node_map.get(self.api_map[key][i])
node_id_end = node_map.get(self.api_map[key][j])
g.add_edge(node_id_start, node_id_end)
# node_size[node_id_start] = node_size[node_id_start] + 5
# node_size[node_id_end] = node_size[node_id_end] + 5
try:
nx.draw(g, pos=layout, ax=self.ax, node_size=[node_size[v] for v in g.nodes()], node_color=[node_color[v] for v in g.nodes()], with_labels=False)
except Exception, e:
print e
self.draw()
def draw_graph(self):
self.ax.clear()
G=nx.Graph()
G.add_edge('a','b',weight=1)
G.add_edge('a','c',weight=1)
G.add_edge('a','d',weight=1)
G.add_edge('a','e',weight=1)
G.add_edge('a','f',weight=1)
G.add_edge('a','g',weight=1)
pos=nx.spring_layout(G)
nx.draw(G,pos,self.ax,node_size=1200,node_shape='o',node_color='0.75')
self.draw()
class matplotlibWidget(QtGui.QWidget):
def __init__(self, parent = None):
QtGui.QWidget.__init__(self, parent)
self.canvas = MplCanvas()
self.vbl = QtGui.QVBoxLayout()
self.vbl.addWidget(self.canvas)
self.setLayout(self.vbl)
def draw_graph(self):
self.canvas.draw_graph()
def draw_api(self, emphasize=False):
self.canvas.draw_api(emphasize)
def draw_mashup(self, emphasize=False):
self.canvas.draw_mashup(emphasize)
def draw_apis(self, apis, current_api=None, highlight_api=None):
self.canvas.draw_related_api(apis, current_api, highlight_api)
def draw_mashups(self, mashups, current_mashup=None, highlight_mashup=None):
self.canvas.draw_related_mashup(mashups, current_mashup, highlight_mashup)
class TestForm(QtGui.QMainWindow):
"""
The form to draw the graph.
"""
def __init__(self, parent=None, f=QtCore.Qt.WindowFlags()):
QtGui.QMainWindow.__init__(self, parent, f)
def draw_api_to_api(self):
self.widget = MatplotlibWidget(1)
def draw_mashup_to_mashup(self):
self.widget = MatplotlibWidget(2)
def draw_member_mashup(self):
self.widget = MatplotlibWidget(3)
class MashUpInfoWidget(QtGui.QWidget):
"""
This widget is used to display the information of certain mashup.
Jia asked me to display this when the mouse hover certain node.
It is kind of hard for me to figure this out with a version with less bugs.
"""
def __init__(self, mashup):
QtGui.QWidget.__init__(self)
self.mashup = mashup
self.sample_url_label = QtGui.QLabel('', self)
self.sample_url_label.setFixedWidth(500)
self.sample_url_label.setText("sample url: %s" % (mashup["sampleUrl"]))
self.sample_url_label.move(0, 20)
self.link_label = QtGui.QLabel('', self)
self.link_label.setFixedWidth(500)
self.link_label.move(0, 50)
self.link_label.setText("link: %s" % (mashup["link"]))
self.summary_label = QtGui.QLabel('', self)
self.summary_label.setFixedWidth(500)
self.summary_label.move(0, 80)
self.summary_label.setText("summary: %s" % (mashup["summary"]))
class MatplotlibWidget(QtGui.QWidget):
"""
Implements a Matplotlib figure inside a QWidget.
Use getFigure() and redraw() to interact with matplotlib.
Example::
mw = MatplotlibWidget()
subplot = mw.getFigure().add_subplot(111)
subplot.plot(x,y)
mw.draw()
"""
def mousePressEvent(self, event):
print "clicked"
def __init__(self, num, size=(5.0, 4.0), dpi=100):
QtGui.QWidget.__init__(self)
l = QtGui.QVBoxLayout()
#Use number to decied which form to show.
if num == 0:
pass
elif num == 1:
self.sc = ApiToApiCanvas(self, width=5, height=4, dpi=100)
elif num == 2:
self.sc = MashupToMashupCanvas(self, width=5, height=4, dpi=100)
elif num == 3:
self.sc = MemberToMashup(self, width=5, height=4, dpi=100)
l.addWidget(self.sc)
self.infoLabel = QtGui.QLabel('', self)
self.infoLabel.move(20, 20)
self.infoLabel.setFixedWidth(1000)
self.vbox = QtGui.QVBoxLayout()
self.vbox.addWidget(self.sc)
self.setLayout(l)
def getFigure(self):
return self.fig
def draw(self):
self.sc.draw()
def show_label(self, x, y):
mashup = mashups[int(random.uniform(0, len(mashups)))]
self.infoLabel.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
self.infoLabel.setText("mashups: %s" % (mashup["id"]))
self.infoLabel.setStyleSheet("background-color: rgb(199, 199, 199);")
self.infoLabel.move(x, y)
def show_detail(self, x, y):
mashup = mashups[int(random.uniform(0, len(mashups)))]
self.detail = MashUpInfoWidget(mashup)
self.detail.resize(500, 100)
self.detail.move(x, y)
self.detail.show()
class TestFigureCanvas(FigureCanvas):
"""
For testing....
"""
def __init__(self, parent=None, width=5, height=4, dpi=100):
fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = fig.add_subplot(111)
# We want the axes cleared every time plot() is called
self.axes.hold(False)
self.draw()
FigureCanvas.__init__(self, fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def set_widget(self, widget):
self.widget = widget
def mousePressEvent(self, event):
if event.button() == 1:
self.widget.show_label(event.x(), event.y())
else:
self.widget.show_detail(event.x(), event.y())
class MashupToMashupCanvas(TestFigureCanvas):
def draw(self):
"""
Canvas for drawing the relationship between mashups
"""
layout = {}
g = nx.Graph()
node_size = {}
node_color = {}
node_map = {}
node_id = 0
api_map = data_source.api_with_mashups()
for key in api_map:
if len(api_map[key]) == 8:
for api in api_map[key]:
if node_map.get(api) == None:
node_map[api] = node_id
g.add_node(node_id)
node_size[node_id] = 50
node_color[node_id] = 0.5
layout[node_id] = (random.random() , random.random())
node_id = node_id + 1
for i in range(0, len(api_map[key])):
for j in range(0, len(api_map[key])):
node_id_start = node_map.get(api_map[key][i])
node_id_end = node_map.get(api_map[key][j])
g.add_edge(node_id_start, node_id_end)
node_size[node_id_start] = node_size[node_id_start] + 5
node_size[node_id_end] = node_size[node_id_end] + 5
try:
nx.draw(g, pos=layout, node_size=[node_size[v] for v in g.nodes()], node_color=[node_color[v] for v in g.nodes()], with_labels=False)
except Exception, e:
print e
class ApiToApiCanvas(TestFigureCanvas):
def draw(self):
"""
Canvas for draw the relationship between apis
"""
mashup_map = data_source.mashup_with_apis()
layout = {}
g = nx.Graph()
node_size = {}
node_color = {}
node_map = {}
node_id = 0
for key in mashup_map:
if len(mashup_map[key]) == 20:
for api in mashup_map[key]:
if node_map.get(api) == None:
node_map[api] = node_id
g.add_node(node_id)
node_size[node_id] = 50
node_color[node_id] = 0.5
layout[node_id] = (random.random() , random.random())
node_id = node_id + 1
for i in range(0, len(mashup_map[key])):
for j in range(0, len(mashup_map[key])):
node_id_start = node_map.get(mashup_map[key][i])
node_id_end = node_map.get(mashup_map[key][j])
g.add_edge(node_id_start, node_id_end)
node_size[node_id_start] = node_size[node_id_start] + 5
node_size[node_id_end] = node_size[node_id_end] + 5
try:
nx.draw(g, pos=layout, node_size=[node_size[v] for v in g.nodes()], node_color=[node_color[v] for v in g.nodes()], with_labels=False)
except Exception, e:
print e
class MemberToMashup(TestFigureCanvas):
def draw(self):
layout = {}
g = nx.Graph()
node_size = {}
node_color = {}
mashup_map = {}
node_id = 0
member_mashups = data_source.get_member_mashups_collections()
for member_mashup in member_mashups:
if (len(member_mashup["mashups"]) > 0):
for mashup in member_mashup["mashups"]:
mashup_id = mashup_map.get(mashup["id"])
if not mashup_id:
mashup_id = mashup["id"]
g.add_node(node_id)
mashup_map[mashup_id] = node_id
node_size[node_id] = 50
node_color[node_id] = 'r'
layout[node_id] = (random.random() , random.random())
node_id = node_id + 1
for i in range(0, len(member_mashup["mashups"])):
for j in range(0, len(member_mashup["mashups"])):
node_id_start = mashup_map.get(member_mashup["mashups"][i]["id"])
node_id_end = mashup_map.get(member_mashup["mashups"][j]["id"])
g.add_edge(node_id_start, node_id_end)
node_size[node_id_start] = node_size[node_id_start] + 5
node_size[node_id_end] = node_size[node_id_end] + 5
try:
nx.draw(g, pos=layout, node_size=[node_size[v] for v in g.nodes()], node_color=[node_color[v] for v in g.nodes()], with_labels=False, width=3)
except Exception, e:
print e
| bsd-3-clause |
Microsoft/hummingbird | tests/test_sklearn_discretizer_converters.py | 1 | 2128 | """
Tests sklearn discretizer converters: Binarizer, KBinsDiscretizer
"""
import unittest
import warnings
import numpy as np
import torch
from sklearn.preprocessing import Binarizer, KBinsDiscretizer
from sklearn.datasets import load_breast_cancer
import hummingbird.ml
class TestSklearnDiscretizers(unittest.TestCase):
# Test Binarizer on dummy data
def test_binarizer_converter(self):
data = np.array([[1, 2, -3], [4, -3, 0], [0, 1, 4], [0, -5, 6]], dtype=np.float32)
data_tensor = torch.from_numpy(data)
for threshold in [0.0, 1.0, -2.0]:
model = Binarizer(threshold=threshold)
model.fit(data)
torch_model = hummingbird.ml.convert(model, "torch")
self.assertIsNotNone(torch_model)
np.testing.assert_allclose(
model.transform(data), torch_model.transform(data_tensor), rtol=1e-06, atol=1e-06,
)
def _test_k_bin_discretizer_base(self, data):
data_tensor = torch.from_numpy(data)
for n_bins in [2, 3, 5, 10, 20]:
for encode in ["ordinal", "onehot", "onehot-dense"]:
model = KBinsDiscretizer(n_bins=n_bins, encode=encode)
model.fit(data)
torch_model = hummingbird.ml.convert(model, "torch")
self.assertIsNotNone(torch_model)
if encode == "onehot":
sk_output = model.transform(data).todense()
else:
sk_output = model.transform(data)
np.testing.assert_allclose(sk_output, torch_model.transform(data_tensor), rtol=1e-06, atol=1e-06)
# Test KBinDiscretizer on dummy data
def test_k_bins_discretizer_converter_dummy_data(self):
data = np.array([[1, 2, -3], [4, -3, 0], [0, 1, 4], [0, -5, 6]], dtype=np.float32)[:, :2]
self._test_k_bin_discretizer_base(data)
# Test KBinDiscretizer on breast cancer data
def test_k_bins_discretizer_converter_breast_cancer_data(self):
self._test_k_bin_discretizer_base(load_breast_cancer().data)
if __name__ == "__main__":
unittest.main()
| mit |
baklanovp/pystella | spec.py | 1 | 28450 | #!/usr/bin/env python3
# #!/usr/bin/python3
import getopt
import os
import sys
from os.path import dirname
import numpy as np
from scipy import interpolate
from scipy.optimize import fmin
import matplotlib.pyplot as plt
# from matplotlib import cm
from matplotlib import gridspec
from matplotlib.collections import PolyCollection
from matplotlib.colors import LinearSegmentedColormap
#from matplotlib.mlab import griddata
from scipy.interpolate import griddata
from mpl_toolkits.mplot3d import Axes3D
import pystella as ps
import logging
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)
__author__ = 'bakl'
ROOT_DIRECTORY = dirname(os.path.abspath(__file__))
colors_band = {'U': "blue", 'B': "cyan", 'V': "black", 'R': "red", 'I': "magenta",
'J': "green", 'H': "cyan", 'K': "black",
'UVM2': "skyblue", 'UVW1': "orange", 'UVW2': "blue",
'g': "g", 'r': "red", 'i': "magenta",
'u': "blue", 'z': "chocolate", 'y': 'olive', 'w': 'tomato'}
_colors = ["blue", "cyan", "brown", 'darkseagreen', 'tomato', 'olive', 'orange',
'skyblue', 'darkviolet']
# colors = {"B-V": "blue", 'B-V-I': "cyan", 'V-I': "brown"}
lntypes = {"B-V": "-", 'B-V-I': "-.", 'V-I': "--"}
markers = {u'D': u'diamond', 6: u'caretup', u's': u'square', u'x': u'x',
5: u'caretright', u'^': u'triangle_up', u'd': u'thin_diamond', u'h': u'hexagon1',
u'+': u'plus', u'*': u'star', u'o': u'circle', u'p': u'pentagon', u'3': u'tri_left',
u'H': u'hexagon2', u'v': u'triangle_down', u'8': u'octagon', u'<': u'triangle_left'}
markers = list(markers.keys())
def plot_spec(dic_stars, times, set_bands, is_planck=False, is_filter=False):
xlim = [1000., 20000]
# xlim = [3000., 6000]
# setup figure
plt.matplotlib.rcParams.update({'font.size': 14})
fig = plt.figure(num=len(set_bands), figsize=(8, len(set_bands) * 4), dpi=100, facecolor='w', edgecolor='k')
gs1 = gridspec.GridSpec(len(set_bands), 1)
gs1.update(wspace=0.3, hspace=0.3, left=0.1, right=0.9)
ax_cache = {}
ax2_cache = {}
ib = 0
for bset in set_bands:
ib += 1
irow = ib - 1
if bset in ax_cache:
ax = ax_cache[bset]
else:
ax = fig.add_subplot(gs1[irow, 0])
ax_cache[bset] = ax
for it in range(len(times)):
star = dic_stars[it]
x = star.Wl * ps.phys.cm_to_angs
y = star.FluxAB
# y = star.FluxWlObs
bcolor = _colors[it % (len(_colors) - 1)]
ax.plot(x, y, marker=markers[it % (len(markers) - 1)],
label=r'$%4.0f^d: T=%7.1e\,K, \zeta=%0.2f$' % (times[it], star.get_Tcol(bset), star.get_zeta(bset)),
markersize=5, color=bcolor, ls="", linewidth=1.5)
if is_planck:
star_bb = planck_fit(star, bset)
xx = star_bb.Wl * ps.phys.cm_to_angs
yy = star_bb.FluxAB
# yy = star_bb.FluxWlObs
if yy is not None:
ax.plot(xx, yy, color=bcolor, ls=":", linewidth=2.5)
# ax.plot(xx, yy, color=bcolor, ls="--", linewidth=2.5, label='Planck')
if is_filter:
if bset in ax2_cache:
ax2 = ax2_cache[bset]
else:
ax2 = fig.add_subplot(gs1[irow, 0], sharex=ax, frameon=False)
# ax2.invert_yaxis()
ax2_cache[bset] = ax2
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position("right")
ax2.set_ylabel(r'Filter transmission')
# ax2.set_ylabel("Filter Response")
bands = bset.split('-')
for n in bands:
b = ps.band.band_by_name(n)
xx = b.wl * ps.phys.cm_to_angs
yy = b.resp_wl
# ax2.plot(xx, yy, color=colors_band[n], ls="--", linewidth=1.5, label=n)
ax2.fill_between(xx, 0, yy, color=colors_band[n], alpha=0.2)
ax2.set_xscale('log')
ax2.set_xlim(xlim)
ax2.legend(prop={'size': 9}, loc=2, borderaxespad=0., fontsize='large')
ib = 0
for bset in set_bands:
ib += 1
ax = ax_cache[bset]
ax.set_xlim(xlim)
# ax.set_ylim(ylim)
ax.invert_yaxis()
ax.set_ylabel(r'Absolute AB Magnitude')
# ax.set_ylabel(r'$F_\lambda, \, [erg\, s^{-1} cm^2]$')
ax.set_xscale('log')
# ax.set_yscale('log')
if ib == len(set_bands):
ax.set_xlabel(r'$\lambda, \, [\AA]$')
ax.set_title(bset)
# if is_filter:
# ax2 = ax2_cache[bset]
# ax2.legend(prop={'size': 9}, loc=4)
# else:
ax.legend(prop={'size': 11}, loc=4, borderaxespad=0.)
# plt.title('; '.join(set_bands) + ' filter response')
# plt.grid()
return fig
def color_map_temp():
cdict = {'blue': ((0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.5, 0.8, 1.0),
(0.75, 1.0, 1.0),
(1.0, 0.4, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.5, 0.9, 0.9),
(0.75, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'red': ((0.0, 0.0, 0.4),
(0.25, 1.0, 1.0),
(0.5, 1.0, 0.8),
(0.75, 0.0, 0.0),
(1.0, 0.0, 0.0))
}
cdict = {'blue': ((0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.5, 0.3, 0.3),
(0.75, 0.75, 1.0),
(1.0, 0.9, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.25, 0., 0.0),
(0.5, 0.9, 0.9),
(0.75, 0., 0.0),
(1.0, 0.0, 0.0)),
'red': ((0.0, 0.0, 0.4),
(0.15, 0.9, 0.9),
(0.25, 0.8, 0.8),
(0.5, 0.5, 0.5),
(0.75, 0.0, 0.0),
(1.0, 0.0, 0.0))
}
# cdict = {'red': ((0.0, 1.0, 1.0),
# (0.1, 1.0, 1.0), # red
# (0.4, 1.0, 1.0), # violet
# (1.0, 0.0, 0.0)), # blue
#
# 'green': ((0.0, 0.0, 0.0),
# (1.0, 0.0, 0.0)),
#
# 'blue': ((0.0, 0.0, 0.0),
# (0.1, 0.0, 0.0), # red
# (0.5, 1.0, 1.0), # violet
# (1.0, 1.0, 0.0)) # blue
# }
return LinearSegmentedColormap('UWR', cdict, 256)
#
# cm.register_cmap(name='UWR', cmap=cmap1)
# UWR = cm.get_cmap('UWR')
# return UWR
def plot_spec_poly(series, moments=None, fcut=1.e-20, is_info=False):
# moments = moments or (1., 3., 5, 10., 15., 25., 35., 50., 70., 100., 120., 150., 200.)
# moments = moments or np.arange(0., 200., 3)
moments = moments or np.exp(np.linspace(np.log(0.5), np.log(400.), 40))
# init graph
fig = plt.figure()
ax = fig.gca(projection='3d')
pos = 0
t_data = []
for i, t in enumerate(series.Time):
if t > moments[pos]:
t_data.append(t)
pos += 1
verts = []
T_cols = []
x_lim = [float("inf"), float("-inf")]
z_lim = [float("inf"), float("-inf")]
for i, t in enumerate(t_data):
spec = series.get_spec_by_time(t_data[i])
spec.cut_flux(fcut * max(spec.Flux)) # cut flux
ys = spec.Flux
# ys = np.log10(ys)
wl = spec.Wl * ps.phys.cm_to_angs
verts.append(list(zip(wl, ys)))
x_lim[0] = min(x_lim[0], np.min(wl))
x_lim[1] = max(x_lim[1], np.max(wl))
z_lim[0] = min(z_lim[0], np.min(ys))
z_lim[1] = max(z_lim[1], np.max(ys))
T_cols.append(spec.T_color)
if is_info:
print("time: %f T_color=%f" % (t, spec.T_color))
# print "time: %f T_wien=%f wl_max=%f" % (t_data[i], spec.temp_wien, spec.wl_flux_max)
Tmap = np.log(T_cols / np.min(T_cols))
color_map = color_map_temp()
m = plt.cm.ScalarMappable(cmap=color_map)
m.set_array(Tmap)
facecolors = m.to_rgba(Tmap * 1.e-4)
poly = PolyCollection(verts, facecolors=facecolors, linewidths=1.5) # np.ones(len(t_data)))
poly.set_alpha(0.5)
ax.add_collection3d(poly, zs=t_data, zdir='y')
# Create a color bar with 11 ticks
cbar = plt.colorbar(m, shrink=0.85)
ticks = np.linspace(min(Tmap), max(Tmap), 10)
ticks_lbl = np.round(np.exp(ticks) * np.min(T_cols), -2)
cbar.ax.set_yticklabels(ticks_lbl)
cbar.ax.yaxis.set_label_text("Color temperature [K]")
ax.set_xlim3d(x_lim)
ax.set_ylim3d(min(t_data), max(t_data))
ax.set_zlim3d(z_lim)
# ax.set_xscale('log')
ax.set_zscale('log')
ax.xaxis.set_label_text('Wavelength [A]')
ax.yaxis.set_label_text('Time [days]')
ax.zaxis.set_label_text('Flux [a.u.]')
return fig
def plot_fit_bands(model, series, set_bands, times):
name = model.Name
tt = model.get_tt().load()
tt = tt[tt['time'] > min(times) - 1.] # time cut days
Rph_spline = interpolate.splrep(tt['time'], tt['Rph'], s=0)
distance = ps.phys.pc2cm(10.) # pc for Absolute magnitude
dic_results = {} # dict((k, None) for k in names)
# it = 0
for it, time in enumerate(times):
print("\nRun: %s t=%f [%d/%d]" % (name, time, it, len(times)))
spec = series.get_spec_by_time(time)
spec.cut_flux(max(spec.Flux) * .1e-6) # cut flux
star = ps.Star("%s: %f" % (name, time), spec=spec, is_flux_eq_luminosity=True)
star.set_distance(distance)
radius = interpolate.splev(time, Rph_spline)
star.set_radius_ph(radius)
star.set_redshift(0.)
for bset in set_bands:
Tcol, zeta = compute_tcolor(star, bset.split('-'))
# save results
star.set_Tcol(Tcol, bset)
star.set_zeta(zeta, bset)
print("\nRun: %s Tcol=%f zeta=%f " % (bset, Tcol, zeta))
dic_results[it] = star
# it += 1
fig = plot_spec(dic_results, times, set_bands, is_planck=True, is_filter=True)
return fig
def save_fit_wl(fsave, mname, time, Tdil, Wdil, wl_ab=None):
# print
# print("{:>10s} {:>12s} {:>12s} {:>12s} ".format("Time", "Twien", "Tcol", "zeta", "Tdil", "Wdil"))
if fsave.endswith('.hdf5'):
import h5py
with h5py.File(fsave, "w") as f:
# data = np.array([time, Tdil, Wdil])
data = np.zeros(len(time), dtype={'names': ['time', 'Tcolor', 'W'], 'formats': [np.float] * 3})
data['time'] = time
data['Tcolor'] = Tdil
data['W'] = Wdil
ds = f.create_dataset('bbfit', data=data)
ds.attrs['model'] = mname
if wl_ab is not None:
ds.attrs['wl_ab'] = '-'.join(map(str, wl_ab))
else:
with open(fsave, "w+") as f:
print("{:>8s}".format("Time") +
' '.join("{:>12s}".format(s) for s in ("T_dil", "W_dil")), file=f)
# ' '.join("{:>12s}".format(s) for s in ("T_wien", "Tcol", "W", "T_dil", "W_dil")), file=f)
# print("{:>10s} {:>12s} {:>12s} {:>12s} ".format("Time", "Twien", "Tcol","zeta"), file=f)
for t, *p in zip(time, Tdil, Wdil):
print("{:8.2f}".format(t) + ' '.join("{0:12.2f}".format(s) for s in p), file=f)
print("The temperature has been saved to %s " % fsave)
return None
def plot_fit_wl(model, series, wl_ab, times=None, fsave=None):
tt = model.get_tt().load()
series_cut = series.copy(wl_ab=wl_ab)
time = series.Time
radius = np.interp(time, tt['time'], tt['rbb'])
if True:
Tcol, Twien, zeta = [], [], []
Tdil, Wdil = [], []
for i, t in enumerate(time):
sp = series_cut.get_spec(i)
# sp = series_cut.get_spec_by_time(t)
R = radius[i]
star_cut = ps.Star("bb_cut", sp)
star_cut.set_distance(R)
sp_obs = ps.Spectrum('cut', star_cut.Freq, star_cut.FluxObs)
Tc = sp_obs.T_color
Tw = sp_obs.T_wien
w = np.power(Tw / Tc, 4)
Td, W_d = sp_obs.T_color_zeta()
Tcol.append(Tc)
Twien.append(Tw)
zeta.append(w)
Tdil.append(Td)
Wdil.append(W_d)
else:
Tcol = series_cut.get_T_color()
Twien = series_cut.get_T_wien()
zeta = np.power(Twien / Tcol, 4)
res = series_cut.get_T_color_zeta()
Tdil, Wdil = res[:, 0], res[:, 1]
print("{:>8}".format("Time") +
' '.join("{:>12s}".format(s) for s in ("T_dil", "W_dil")))
for t, *p in zip(time, Tdil, Wdil):
print("{:8.2f}".format(t) + ' '.join("{0:12.2f}".format(s) for s in p))
# print("%10.2f %12.6f %12.6f %12.6f " % p)
# save
if fsave is not None:
save_fit_wl(fsave, model.Name, time, Tdil, Wdil, wl_ab=wl_ab)
return
# plot
fig, ax = plt.subplots()
marker_style = dict(linestyle=':', color='cornflowerblue', markersize=10)
ax.semilogy(time, Tcol, 'ks-', markerfacecolor='white', markersize=3, label="Tcolor")
ax.semilogy(time, Twien, 'r:', label="Twien")
ax.semilogy(time, Tdil, 'ys-', markersize=3, label="T_dil")
ax.semilogy(tt['time'], tt['Tbb'], 'g:', label="tt Tbb")
# ax.semilogy(tt['time'], tt['Teff'], 'y:', label="tt Teff")
# ax.semilogy(results['time'], tt['T'], 'm:', label="tt Teff")
ax.legend()
# wl range
if times is not None:
for xc in times:
ax.axvline(x=xc, color="grey", linestyle='--')
fig = plot_spec_wl(times, series, tt, wl_ab)
else:
pass
return fig
def plot_spec_wl(times, series, tt, wl_ab, **kwargs):
font_size = kwargs.get('font_size', 12)
nrow = np.math.ceil(len(times)/2.)
ncol = 2
fig = plt.figure(figsize=(12, nrow * 4))
plt.matplotlib.rcParams.update({'font.size': font_size})
series_cut = series.copy(wl_ab=wl_ab)
# radiuses = np.interp(times, tt['time'], tt['Rph'], 0, 0)
radiuses = np.interp(times, tt['time'], tt['rbb'], 0, 0)
Tbbes = np.interp(times, tt['time'], tt['Tbb'], 0, 0)
marker_style = dict(linestyle=':', markersize=5)
for i, t in enumerate(times):
ax = fig.add_subplot(nrow, ncol, i + 1)
spec = series.get_spec_by_time(t)
spec.cut_flux(max(spec.Flux) * 1e-6) # cut flux
R = radiuses[i]
star_bb = ps.Star("bb", spec)
star_bb.set_distance(R)
spec_cut = series_cut.get_spec_by_time(t)
star_cut = ps.Star("bb_cut", spec_cut)
star_cut.set_distance(R)
# spectrum
ax.semilogy(star_bb.Wl * ps.phys.cm_to_angs, star_bb.FluxWlObs, label="Spec Ph")
# Tcolor
spec_obs = ps.Spectrum('wbb', star_cut.Freq, star_cut.FluxObs)
Tcol = spec_obs.T_color
T_wien = spec_obs.T_wien
zeta = (T_wien / Tcol) ** 4
wbb = ps.SpectrumDilutePlanck(spec.Freq, Tcol, zeta)
ax.semilogy(wbb.Wl * ps.phys.cm_to_angs, wbb.FluxWl, label="Tcol={:.0f} W={:.2f}".format(Tcol, zeta),
marker='<', **marker_style)
# diluted
Tdil, W = spec_obs.T_color_zeta()
dil = ps.SpectrumDilutePlanck(spec.Freq, Tdil, W)
ax.semilogy(dil.Wl * ps.phys.cm_to_angs, dil.FluxWl, label="Tdil={:.0f} W={:.2f}".format(Tdil, W),
marker='>', **marker_style)
# Tbb
Tbb = Tbbes[i]
bb = ps.SpectrumPlanck(spec.Freq, Tbb)
ax.semilogy(bb.Wl * ps.phys.cm_to_angs, bb.FluxWl, label="Tbb={:.0f}".format(Tbb),
marker='d', **marker_style)
# wl range
if wl_ab is not None:
for xc in wl_ab:
plt.axvline(x=xc, color="grey", linestyle='--')
ax.legend(loc="best", prop={'size': 9})
ax.text(0.01, 0.05, "$t_d: {:.1f}$".format(t), horizontalalignment='left', transform=ax.transAxes,
bbox=dict(facecolor='green', alpha=0.3))
xlim = ax.get_xlim()
ax.set_xlim(xlim[0], min(xlim[1], 2e4))
# fig.subplots_adjust(wspace=0, hspace=0)
# fig.subplots_adjust(wspace=0, hspace=0, left=0.07, right=0.96, top=0.97, bottom=0.06)
plt.subplots_adjust(left=0.07, right=0.96, top=0.97, bottom=0.06)
return fig
def plot_spec_t(series, wl_lim=None, moments=None):
wl_lim = wl_lim or (1e1, 5e4)
moments = moments or (1., 3., 5, 10., 15., 25., 35., 50., 70., 100., 120., 150., 200.)
# moments = np.arange(0.5, 200., 3)
pos = 0
t_data = []
spec_array = []
for i, t in enumerate(series.Time):
if t > moments[pos]:
t_data.append(t)
spec_array.append(series.get_spec(i).Flux)
pos += 1
y_data = series.Wl * ps.phys.cm_to_angs
spec_array = np.array(spec_array)
x_data, y_data = np.meshgrid(t_data, y_data)
x = x_data.flatten()
y = y_data.flatten()
z = spec_array.flatten()
# filters
is_z = z > np.max(z) * 1.e-20
is_y = (y > wl_lim[0]) & (y < wl_lim[1])
is_good = is_y & is_z
x = x[is_good]
y = y[is_good]
z = z[is_good]
fig = plt.figure()
# scatter3D
if True:
ax = Axes3D(fig)
ax.scatter3D(x, y, z, c=z, cmap=plt.cm.viridis) # jet)
plt.show()
return None
# plot_surface
is_plot_surface = False
if is_plot_surface:
xi = np.linspace(min(x), max(x))
yi = np.linspace(min(y), max(y))
X, Y = np.meshgrid(xi, yi)
# interpolation
Z = griddata(x, y, z, xi, yi)
ax = Axes3D(fig)
ax.scatter3D(x, y, z, c=z, cmap=plt.cm.viridis)
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3)
plt.show()
return None
#
# surf = ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap=cm.coolwarm,
# linewidth=0, antialiased=False)
#
# ax = fig.add_subplot(111, projection='3d')
#
# X, Y = np.meshgrid(x_data, y_data)
# z_data = griddata(x, y, data_array, x_data, y_data)
#
# if is_spline:
# spline = sp.interpolate.Rbf(x, y, z, function='thin-plate')
# xi = np.linspace(min(x), max(x))
# yi = np.linspace(min(y), max(y))
# X, Y = np.meshgrid(xi, yi)
# # interpolation
# Z = spline(X, Y)
# x_data, y_data = np.meshgrid(np.arange(data_array.shape[1]),
# np.arange(data_array.shape[0]))
#
# Flatten out the arrays so that they may be passed to "ax.bar3d".
# Basically, ax.bar3d expects three one-dimensional arrays:
# x_data, y_data, z_data. The following call boils down to picking
# one entry from each array and plotting a bar to from
# (x_data[i], y_data[i], 0) to (x_data[i], y_data[i], z_data[i]).
#
# x_data = x_data.flatten()
# y_data = y_data.flatten()
# z_data = data_array.flatten()
# ax.bar3d(x_data,
# y_data,
# np.zeros(len(z_data)),
# 1, 1, z_data)
# surf = ax.plot_surface(x_data, y_data, z_data, rstride=1, cstride=1, cmap=cm.coolwarm,
# linewidth=0, antialiased=False)
# plt.show()
def planck_fit(star, bset):
sp = ps.SpectrumDilutePlanck(star.Freq, star.get_Tcol(bset), star.get_zeta(bset) ** 2)
star_bb = ps.Star("bb", sp)
star_bb.set_radius_ph(star.radius_ph)
star_bb.set_distance(star.distance)
return star_bb
def epsilon_mag(x, freq, mag, bands, radius, dist):
temp_color, zeta = x
sp = ps.SpectrumDilutePlanck(freq, temp_color, zeta ** 2)
star = ps.Star("bb", sp)
star.set_radius_ph(radius)
star.set_distance(dist)
mag_bb = {b: star.magAB(ps.band.band_by_name(b)) for b in bands}
e = 0
for b in bands:
e += abs(mag[b] - mag_bb[b])
return e
def compute_tcolor(star, bands):
mags = {}
for n in bands:
b = ps.band.band_by_name(n)
mags[n] = star.magAB(b)
Tcol, zeta = fmin(epsilon_mag, x0=np.array([1.e4, 1]),
args=(star.Freq, mags, bands, star.radius_ph, star.distance), disp=False)
return Tcol, zeta
def interval2float(v):
a = str(v).split(':')
if len(a) == 2 and len(a[0]) == 0:
return 0., float(a[1])
elif len(a) == 2 and len(a[1]) == 0:
return float(a[0]), float("inf")
elif len(a) == 2:
return list(map(np.float, str(v).split(':')))
elif len(a) == 1:
return float(v[0])
else:
return None
def usage():
bands = ps.band.get_names()
print("Usage: show F(t,nu) from ph-file")
print(" spec.py [params]")
print(" -b <set_bands>: delimiter '_'. Default: B-V.\n"
" Available: " + '-'.join(sorted(bands)))
print(" -f force mode: rewrite tcolor-files even if it exists")
print(" -i <model name>. Example: cat_R450_M15_Ni007_E7")
print(" -k k-correction: z:Srest:Sobs. Example: 0.5:V:R")
print(" -o options: [fit, wl] - plot spectral fit")
print(" -p <model path(directory)>, default: ./")
print(" -s <file-name> without extension. Save plot to pdf-file. Default: spec_<file-name>.pdf")
print(" -t time interval [day]. Example: 5.:75.")
print(" -x wave length interval [A]. Example: 1.:25e3")
print(" -w write data to file. Example: flux to magAB, Tcolor and W [.hdf5]")
print(" -h print usage")
def write_magAB(series, d=10):
"""
Write SED in AB mag
:param series:
:param d: distance [pc]
:return:
"""
print("times: {0} freqs: {1} [{1}]".format(len(series.Time), len(series.get_spec(0).Freq), series.Name))
print("{0}".format(' '.join(map(str, series.get_spec(0).Freq))))
for i, t in enumerate(series.Time):
spec = series.get_spec(i)
# freq = s.Freq
# flux = spec.Flux
# mAB = rf.Flux2MagAB(flux)
star = ps.Star("%s: %f" % (series.Name, t), spec=spec, is_flux_eq_luminosity=True)
star.set_distance(d * ps.phys.pc)
# print("{0} {1} ".format(t, ' '.join(map(str, star.Flux))))
print("{0} {1} ".format(t, ' '.join(map(str, star.FluxAB))))
# print("{0} {1} ".format(t, ' '.join(map(str, flux))))
# print("{0} {1} ".format(t, ' '.join(map(str, mAB))))
def plot_kcorr(times, kcorr, figsize=(12, 12)):
fig, ax = plt.subplots(figsize=figsize)
ax.plot(times, kcorr, marker='o', ls='')
ax.set_xlabel('Time')
ax.set_ylabel('K-correction')
return fig
def kcorr_save(fname, times, kcorr):
with open(fname, "w") as f:
print("{:>8s} {:>8s}".format("Time", "kcorr"), file=f)
for t, k in zip(times, kcorr):
print("{:8.2f} {:12.2f}".format(t, k), file=f)
print("The k-corrections has been saved to %s " % fname)
def main():
is_save_plot = False
is_kcorr = False
is_fit = False
is_fit_wl = False
is_write = False
fsave = None
fplot = None
z_sn = 0.
bn_rest = None
bn_obs = None
try:
opts, args = getopt.getopt(sys.argv[1:], "b:fhsup:i:k:o:t:w:x:")
except getopt.GetoptError as err:
print(str(err)) # will print something like "option -a not recognized"
usage()
sys.exit(2)
name = ''
path = os.getcwd()
ps.Band.load_settings()
# name = 'cat_R500_M15_Ni006_E12'
if not name:
if len(opts) == 0:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-i':
name = str(arg)
break
# set_bands = ['B-V']
# set_bands = ['B-V', 'B-V-I']
# set_bands = ['U-B', 'U-B-V', 'B-V']
t_ab = None
wl_ab = None
set_bands = ['B-V', 'B-V-I', 'V-I']
# set_bands = ['B-V', 'B-V-I', 'V-I', 'J-H-K']
times = [5., 15., 30., 60., 90., 120.]
for opt, arg in opts:
if opt == '-b':
set_bands = str(arg).split('_')
for bset in set_bands:
for b in bset.split('-'):
if not ps.band.is_exist(b):
print('No such band: ' + b)
sys.exit(2)
continue
if opt == '-w':
is_write = True
fsave = arg
continue
if opt == '-s':
is_save_plot = True
if len(arg) > 0:
fplot = str(arg).strip()
continue
if opt == '-x':
wl_ab = interval2float(arg)
# wl_ab = [np.float(s) for s in (str(arg).split(':'))]
continue
if opt == '-t':
t_ab = list(map(float, arg.split(':'))) # interval2float(arg)
if len(t_ab) > 1:
times = t_ab
continue
if opt == '-o':
ops = str(arg).split(':')
is_fit = "fit" in ops
is_fit_wl = "wl" in ops
continue
if opt == '-k':
ops = str(arg).split(':')
if len(ops) == 3:
z_sn = float(ops[0])
bn_rest = ops[1].strip()
bn_obs = ops[2].strip()
is_kcorr = True
else:
raise ValueError('Args: {} should be string as "z:Srest:Sobs"'.format(arg))
continue
if opt == '-p':
path = os.path.expanduser(str(arg))
if not (os.path.isdir(path) and os.path.exists(path)):
print("No such directory: " + path)
sys.exit(2)
continue
elif opt == '-h':
usage()
sys.exit(2)
if not name:
print("No model. Use key -i.")
sys.exit(2)
model = ps.Stella(name, path=path)
series = model.get_ph(t_diff=1.05)
if not model.is_ph:
print("No ph-data for: " + str(model))
return None
if is_fit:
if is_write:
if fsave is None or len(fsave) == 0:
fsave = "spec_%s" % name
print("Save series to %s " % fsave)
series_cut = series.copy(t_ab=t_ab, wl_ab=wl_ab)
write_magAB(series_cut)
sys.exit(2)
if not model.is_tt:
print("Error in fit-band: no tt-data for: " + str(model))
sys.exit(2)
series = model.get_ph(t_diff=1.05)
series_cut = series.copy(t_ab=t_ab, wl_ab=wl_ab)
fig = plot_fit_bands(model, series_cut, set_bands, times)
elif is_kcorr:
times, kcorr = [], []
for t, k in ps.rf.rad_func.kcorrection(series, z_sn, bn_rest, bn_obs):
times.append(t)
kcorr.append(k)
if is_write:
if fsave is None or len(fsave) == 0 or fsave == '1':
fsave = os.path.join(os.path.expanduser('~/'), "kcorr_%s" % name) + '.txt'
kcorr_save(fsave, times, kcorr)
sys.exit(3)
else:
fig = plot_kcorr(times, kcorr)
elif is_fit_wl:
if not model.is_tt:
print("Error in fit-wave: no tt-data for: " + str(model))
sys.exit(2)
if is_write:
if fsave is None or len(fsave) == 0 or fsave == '1':
fsave = os.path.join(os.path.expanduser('~/'), "temp_%s" % name) + '.txt'
series = series.copy(t_ab=t_ab)
plot_fit_wl(model, series, wl_ab, times, fsave=fsave) # just save data
sys.exit(3)
fig = plot_fit_wl(model, series, wl_ab, times)
else:
series = model.get_ph(t_diff=1.05)
series_cut = series.copy(t_ab=t_ab, wl_ab=wl_ab)
fig = plot_spec_poly(series_cut)
print("Plot spectral F(t,nu): " + str(model))
if fig is not None:
if is_save_plot:
if fplot is None or len(fplot) == 0:
fplot = "spec_%s" % name
d = os.path.expanduser('~/')
fplot = os.path.join(d, os.path.splitext(fplot)[0]) + '.pdf'
print("Save plot to %s " % fplot)
fig.savefig(fplot, bbox_inches='tight')
else:
# plt.grid()
plt.show()
if __name__ == '__main__':
main()
| mit |
xbanke/ebokeh | setup.py | 1 | 1626 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version: 0.1
@author: quantpy
@email: [email protected]
@file: setup.py
@time: 2017-08-08 22:20
"""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from ebokeh import __version__
__title__ = 'ebokeh'
__description__ = 'Enhanced Ebokeh, make charting much easier!'
__url__ = 'https://github.com/xbanke/ebokeh'
__author__ = 'quantpy'
__author_email__ = '[email protected]'
__license__ = 'MIT'
__requires__ = ['bokeh', 'pandas']
__package__ = ['ebokeh', 'ebokeh/plotting']
__kewords__ = ['bokeh', 'charts']
setup(
name=__title__,
version=__version__,
description=__description__,
url=__url__,
author=__author__,
author_email=__author_email__,
license=__license__,
packages=__package__,
keywords=__kewords__,
install_requires=__requires__,
zip_safe=False,
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: Implementation',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries'
]
)
| mit |
jakirkham/bokeh | bokeh/core/property/datetime.py | 3 | 5438 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide date and time related properties
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import datetime
import dateutil.parser
# External imports
from six import string_types
# Bokeh imports
from ...util.dependencies import import_optional
from .bases import Property
from .primitive import bokeh_integer_types
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'Date',
'Datetime',
'TimeDelta',
)
pd = import_optional('pandas')
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Date(Property):
''' Accept Date (but not DateTime) values.
'''
def __init__(self, default=None, help=None):
super(Date, self).__init__(default=default, help=help)
def transform(self, value):
value = super(Date, self).transform(value)
if isinstance(value, (float,) + bokeh_integer_types):
try:
value = datetime.date.fromtimestamp(value)
except (ValueError, OSError):
value = datetime.date.fromtimestamp(value/1000)
elif isinstance(value, string_types):
value = dateutil.parser.parse(value).date()
return value
def validate(self, value, detail=True):
super(Date, self).validate(value, detail)
if not (value is None or isinstance(value, (datetime.date,) + string_types + (float,) + bokeh_integer_types)):
msg = "" if not detail else "expected a date, string or timestamp, got %r" % value
raise ValueError(msg)
class Datetime(Property):
''' Accept Datetime values.
'''
def __init__(self, default=datetime.date.today(), help=None):
super(Datetime, self).__init__(default=default, help=help)
def transform(self, value):
value = super(Datetime, self).transform(value)
return value
# Handled by serialization in protocol.py for now
def validate(self, value, detail=True):
super(Datetime, self).validate(value, detail)
datetime_types = (datetime.datetime, datetime.date)
try:
import numpy as np
datetime_types += (np.datetime64,)
except (ImportError, AttributeError) as e:
if e.args == ("'module' object has no attribute 'datetime64'",):
import sys
if 'PyPy' in sys.version:
pass
else:
raise e
else:
pass
if (isinstance(value, datetime_types)):
return
if pd and isinstance(value, (pd.Timestamp)):
return
msg = "" if not detail else "Expected a datetime instance, got %r" % value
raise ValueError(msg)
class TimeDelta(Property):
''' Accept TimeDelta values.
'''
def __init__(self, default=datetime.timedelta(), help=None):
super(TimeDelta, self).__init__(default=default, help=help)
def transform(self, value):
value = super(TimeDelta, self).transform(value)
return value
# Handled by serialization in protocol.py for now
def validate(self, value, detail=True):
super(TimeDelta, self).validate(value, detail)
timedelta_types = (datetime.timedelta,)
try:
import numpy as np
timedelta_types += (np.timedelta64,)
except (ImportError, AttributeError) as e:
if e.args == ("'module' object has no attribute 'timedelta64'",):
import sys
if 'PyPy' in sys.version:
pass
else:
raise e
else:
pass
if (isinstance(value, timedelta_types)):
return
if pd and isinstance(value, (pd.Timedelta)):
return
msg = "" if not detail else "Expected a timedelta instance, got %r" % value
raise ValueError(msg)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause |
youprofit/scikit-image | skimage/io/manage_plugins.py | 17 | 10353 | """Handle image reading, writing and plotting plugins.
To improve performance, plugins are only loaded as needed. As a result, there
can be multiple states for a given plugin:
available: Defined in an *ini file located in `skimage.io._plugins`.
See also `skimage.io.available_plugins`.
partial definition: Specified in an *ini file, but not defined in the
corresponding plugin module. This will raise an error when loaded.
available but not on this system: Defined in `skimage.io._plugins`, but
a dependent library (e.g. Qt, PIL) is not available on your system.
This will raise an error when loaded.
loaded: The real availability is determined when it's explicitly loaded,
either because it's one of the default plugins, or because it's
loaded explicitly by the user.
"""
import sys
if sys.version.startswith('3'):
from configparser import ConfigParser # Python 3
else:
from ConfigParser import ConfigParser # Python 2
import os.path
from glob import glob
from .collection import imread_collection_wrapper
__all__ = ['use_plugin', 'call_plugin', 'plugin_info', 'plugin_order',
'reset_plugins', 'find_available_plugins', 'available_plugins']
# The plugin store will save a list of *loaded* io functions for each io type
# (e.g. 'imread', 'imsave', etc.). Plugins are loaded as requested.
plugin_store = None
# Dictionary mapping plugin names to a list of functions they provide.
plugin_provides = {}
# The module names for the plugins in `skimage.io._plugins`.
plugin_module_name = {}
# Meta-data about plugins provided by *.ini files.
plugin_meta_data = {}
# For each plugin type, default to the first available plugin as defined by
# the following preferences.
preferred_plugins = {
# Default plugins for all types (overridden by specific types below).
'all': ['pil', 'matplotlib', 'qt', 'freeimage'],
'imshow': ['matplotlib']
}
def _clear_plugins():
"""Clear the plugin state to the default, i.e., where no plugins are loaded
"""
global plugin_store
plugin_store = {'imread': [],
'imsave': [],
'imshow': [],
'imread_collection': [],
'_app_show': []}
_clear_plugins()
def _load_preferred_plugins():
# Load preferred plugin for each io function.
io_types = ['imsave', 'imshow', 'imread_collection', 'imread']
for p_type in io_types:
_set_plugin(p_type, preferred_plugins['all'])
plugin_types = (p for p in preferred_plugins.keys() if p != 'all')
for p_type in plugin_types:
_set_plugin(p_type, preferred_plugins[p_type])
def _set_plugin(plugin_type, plugin_list):
for plugin in plugin_list:
if plugin not in available_plugins:
continue
try:
use_plugin(plugin, kind=plugin_type)
break
except (ImportError, RuntimeError, OSError):
pass
def reset_plugins():
_clear_plugins()
_load_preferred_plugins()
def _parse_config_file(filename):
"""Return plugin name and meta-data dict from plugin config file."""
parser = ConfigParser()
parser.read(filename)
name = parser.sections()[0]
meta_data = {}
for opt in parser.options(name):
meta_data[opt] = parser.get(name, opt)
return name, meta_data
def _scan_plugins():
"""Scan the plugins directory for .ini files and parse them
to gather plugin meta-data.
"""
pd = os.path.dirname(__file__)
config_files = glob(os.path.join(pd, '_plugins', '*.ini'))
for filename in config_files:
name, meta_data = _parse_config_file(filename)
plugin_meta_data[name] = meta_data
provides = [s.strip() for s in meta_data['provides'].split(',')]
valid_provides = [p for p in provides if p in plugin_store]
for p in provides:
if not p in plugin_store:
print("Plugin `%s` wants to provide non-existent `%s`."
" Ignoring." % (name, p))
# Add plugins that provide 'imread' as provider of 'imread_collection'.
need_to_add_collection = ('imread_collection' not in valid_provides and
'imread' in valid_provides)
if need_to_add_collection:
valid_provides.append('imread_collection')
plugin_provides[name] = valid_provides
plugin_module_name[name] = os.path.basename(filename)[:-4]
_scan_plugins()
def find_available_plugins(loaded=False):
"""List available plugins.
Parameters
----------
loaded : bool
If True, show only those plugins currently loaded. By default,
all plugins are shown.
Returns
-------
p : dict
Dictionary with plugin names as keys and exposed functions as
values.
"""
active_plugins = set()
for plugin_func in plugin_store.values():
for plugin, func in plugin_func:
active_plugins.add(plugin)
d = {}
for plugin in plugin_provides:
if not loaded or plugin in active_plugins:
d[plugin] = [f for f in plugin_provides[plugin]
if not f.startswith('_')]
return d
available_plugins = find_available_plugins()
def call_plugin(kind, *args, **kwargs):
"""Find the appropriate plugin of 'kind' and execute it.
Parameters
----------
kind : {'imshow', 'imsave', 'imread', 'imread_collection'}
Function to look up.
plugin : str, optional
Plugin to load. Defaults to None, in which case the first
matching plugin is used.
*args, **kwargs : arguments and keyword arguments
Passed to the plugin function.
"""
if not kind in plugin_store:
raise ValueError('Invalid function (%s) requested.' % kind)
plugin_funcs = plugin_store[kind]
if len(plugin_funcs) == 0:
msg = ("No suitable plugin registered for %s.\n\n"
"You may load I/O plugins with the `skimage.io.use_plugin` "
"command. A list of all available plugins can be found using "
"`skimage.io.plugins()`.")
raise RuntimeError(msg % kind)
plugin = kwargs.pop('plugin', None)
if plugin is None:
_, func = plugin_funcs[0]
else:
_load(plugin)
try:
func = [f for (p, f) in plugin_funcs if p == plugin][0]
except IndexError:
raise RuntimeError('Could not find the plugin "%s" for %s.' %
(plugin, kind))
return func(*args, **kwargs)
def use_plugin(name, kind=None):
"""Set the default plugin for a specified operation. The plugin
will be loaded if it hasn't been already.
Parameters
----------
name : str
Name of plugin.
kind : {'imsave', 'imread', 'imshow', 'imread_collection'}, optional
Set the plugin for this function. By default,
the plugin is set for all functions.
See Also
--------
available_plugins : List of available plugins
Examples
--------
To use Matplotlib as the default image reader, you would write:
>>> from skimage import io
>>> io.use_plugin('matplotlib', 'imread')
To see a list of available plugins run ``io.available_plugins``. Note that
this lists plugins that are defined, but the full list may not be usable
if your system does not have the required libraries installed.
"""
if kind is None:
kind = plugin_store.keys()
else:
if not kind in plugin_provides[name]:
raise RuntimeError("Plugin %s does not support `%s`." %
(name, kind))
if kind == 'imshow':
kind = [kind, '_app_show']
else:
kind = [kind]
_load(name)
for k in kind:
if not k in plugin_store:
raise RuntimeError("'%s' is not a known plugin function." % k)
funcs = plugin_store[k]
# Shuffle the plugins so that the requested plugin stands first
# in line
funcs = [(n, f) for (n, f) in funcs if n == name] + \
[(n, f) for (n, f) in funcs if n != name]
plugin_store[k] = funcs
def _inject_imread_collection_if_needed(module):
"""Add `imread_collection` to module if not already present."""
if not hasattr(module, 'imread_collection') and hasattr(module, 'imread'):
imread = getattr(module, 'imread')
func = imread_collection_wrapper(imread)
setattr(module, 'imread_collection', func)
def _load(plugin):
"""Load the given plugin.
Parameters
----------
plugin : str
Name of plugin to load.
See Also
--------
plugins : List of available plugins
"""
if plugin in find_available_plugins(loaded=True):
return
if not plugin in plugin_module_name:
raise ValueError("Plugin %s not found." % plugin)
else:
modname = plugin_module_name[plugin]
plugin_module = __import__('skimage.io._plugins.' + modname,
fromlist=[modname])
provides = plugin_provides[plugin]
for p in provides:
if p == 'imread_collection':
_inject_imread_collection_if_needed(plugin_module)
elif not hasattr(plugin_module, p):
print("Plugin %s does not provide %s as advertised. Ignoring." %
(plugin, p))
continue
store = plugin_store[p]
func = getattr(plugin_module, p)
if not (plugin, func) in store:
store.append((plugin, func))
def plugin_info(plugin):
"""Return plugin meta-data.
Parameters
----------
plugin : str
Name of plugin.
Returns
-------
m : dict
Meta data as specified in plugin ``.ini``.
"""
try:
return plugin_meta_data[plugin]
except KeyError:
raise ValueError('No information on plugin "%s"' % plugin)
def plugin_order():
"""Return the currently preferred plugin order.
Returns
-------
p : dict
Dictionary of preferred plugin order, with function name as key and
plugins (in order of preference) as value.
"""
p = {}
for func in plugin_store:
p[func] = [plugin_name for (plugin_name, f) in plugin_store[func]]
return p
| bsd-3-clause |
pprett/statsmodels | statsmodels/tools/datautils.py | 1 | 2502 | import os
import time
import numpy as np
from numpy import genfromtxt, array
class Dataset(dict):
def __init__(self, **kw):
dict.__init__(self,kw)
self.__dict__ = self
# Some datasets have string variables. If you want a raw_data attribute you
# must create this in the dataset's load function.
try: # some datasets have string variables
self.raw_data = self.data.view((float, len(self.names)))
except:
pass
def __repr__(self):
return str(self.__class__)
def process_recarray(data, endog_idx=0, exog_idx=None, stack=True, dtype=None):
names = list(data.dtype.names)
if isinstance(endog_idx, int):
endog = array(data[names[endog_idx]], dtype=dtype)
endog_name = names[endog_idx]
endog_idx = [endog_idx]
else:
endog_name = [names[i] for i in endog_idx]
if stack:
endog = np.column_stack(data[field] for field in endog_name)
else:
endog = data[endog_name]
if exog_idx is None:
exog_name = [names[i] for i in xrange(len(names))
if i not in endog_idx]
else:
exog_name = [names[i] for i in exog_idx]
if stack:
exog = np.column_stack(data[field] for field in exog_name)
else:
exog = data[exog_name]
if dtype:
endog = endog.astype(dtype)
exog = exog.astype(dtype)
dataset = Dataset(data=data, names=names, endog=endog, exog=exog,
endog_name=endog_name, exog_name=exog_name)
return dataset
def process_recarray_pandas(data, endog_idx=0, exog_idx=None, dtype=None):
from pandas import DataFrame
data = DataFrame(data, dtype=dtype)
names = data.columns
if isinstance(endog_idx, int):
endog_name = names[endog_idx]
endog = data[endog_name]
if exog_idx is None:
exog = data.drop([endog_name], axis=1)
else:
exog = data.filter(names[exog_idx])
else:
endog = data.ix[:, endog_idx]
endog_name = list(endog.columns)
if exog_idx is None:
exog = data.drop(endog_name, axis=1)
elif isinstance(exog_idx, int):
exog = data.filter([names[exog_idx]])
else:
exog = data.filter(names[exog_idx])
exog_name = list(exog.columns)
dataset = Dataset(data=data, names=list(names), endog=endog, exog=exog,
endog_name=endog_name, exog_name=exog_name)
return dataset
| bsd-3-clause |
scienceopen/themis-asi-reader | src/themisasi/pixels.py | 2 | 1805 | from argparse import ArgumentParser
import numpy as np
from .io import load
from .fov import getimgind, projected_coord
from .plots import plotazel, plottimeseries
from matplotlib.pyplot import show
"""
Plot time series for pixel(s) chosen by az/el
PlotThemisPixels ~/data/themis/ fykn 2011-01-06T17:00:00 -az 65 70 -el 48 68
PlotThemisPixels tests/ gako 2011-01-06T17:00:00 -az 65 70 -el 48 68
"""
def cli():
p = ArgumentParser(description=" reads THEMIS GBO ASI CDF files and plays high speed video")
p.add_argument("path", help="ASI data path")
p.add_argument("site", help="site 4 character code e.g. gako")
p.add_argument("treq", help="time (or time start,stop) requested", nargs="+")
g = p.add_mutually_exclusive_group()
p.add_argument("-az", help="azimuth(s) to plot (degrees)", type=float, nargs="+")
g.add_argument("-el", help="elevation(s) to plot (degrees)", type=float, nargs="+")
g.add_argument(
"-lla", help="latitude, longitude, altitude [km] projection", type=float, nargs=3
)
p.add_argument("-o", "--odir", help="write video to this directory")
p.add_argument("-v", "--verbose", action="store_true")
P = p.parse_args()
imgs = load(P.path, site=P.site, treq=P.treq)
if P.verbose:
plotazel(imgs)
# %% select nearest neighbor
ind = getimgind(imgs, P.lla, P.az, P.el)
az, el, plat, plon, palt_m = projected_coord(imgs, ind, P.lla)
print(f"Using az, el {az}, {el}")
print(f"Using projected lat,lon, alt [km] {plat} {plon} {palt_m}")
dat = np.empty((imgs.time.size, ind.shape[0]), dtype=imgs["imgs"].dtype)
for i, j in enumerate(ind):
dat[:, i] = imgs["imgs"][:, j[0], j[1]]
# %% plot
ttxt = f"{imgs.filename}"
plottimeseries(dat, imgs.time, ttxt)
show()
| gpl-3.0 |
miykael/BrainsForPublication | NotYetCurated/PlottingSurfaceValuesWithPysurfer/pysurfer_plot_VISAN_surface_values.py | 3 | 15401 | #!/usr/bin/env python
#=============================================================================
# Created by Kirstie Whitaker
# September 2014
# Contact: [email protected]
#=============================================================================
#=============================================================================
# IMPORTS
#=============================================================================
import os
import sys
import argparse
import numpy as np
from surfer import Brain, io
import itertools as it
import matplotlib.pylab as plt
import matplotlib.image as mpimg
import matplotlib.gridspec as gridspec
#=============================================================================
# FUNCTIONS
#=============================================================================
def setup_argparser():
'''
Code to read in arguments from the command line
Aso allows you to change some settings
'''
# Build a basic parser.
help_text = ('Plot the NORA VISAN data on a freesurfer surface')
sign_off = 'Author: Kirstie Whitaker <[email protected]>'
parser = argparse.ArgumentParser(description=help_text, epilog=sign_off)
# Now add the arguments
parser.add_argument(dest='output_dir',
type=str,
metavar='output_dir',
help='output directory')
parser.add_argument('-cope1', '--cope1_file',
type=str,
metavar='cope1_file',
help='cope1 nifti file in MNI space',
default=None)
parser.add_argument('-cope2', '--cope2_file',
type=str,
metavar='cope2_file',
help='cope2 nifti file in MNI space',
default=None)
parser.add_argument('-cope4', '--cope4_file',
type=str,
metavar='cope4_file',
help='cope4 nifti file in MNI space',
default=None)
parser.add_argument('--subject_id',
type=str,
metavar='subject id',
help='freesurfer subject id',
default='fsaverage')
parser.add_argument('-sd', '--subjects_dir',
type=str,
metavar='subjects_dir',
help='freesurfer subjects dir',
default=os.environ["SUBJECTS_DIR"])
parser.add_argument('-c', '--cmap',
type=str,
metavar='cmap',
help='colormap',
default='autumn')
parser.add_argument('-cf', '--color_file',
type=str,
metavar='color_file',
help='file containing list of custom colors',
default=None)
parser.add_argument('--center',
action='store_true',
help='center the color bar around 0')
parser.add_argument('-t', '--thresh',
type=float,
metavar='thresh',
help='mask values below this value',
default=-98)
parser.add_argument('-m', '--mask',
type=float,
metavar='mask',
help='mask values that are exactly this value',
default=0)
parser.add_argument('-l', '--lower',
type=float,
metavar='lowerthr',
help='lower limit for colorbar',
default=None)
parser.add_argument('-u', '--upper',
type=float,
metavar='upperthr',
help='upper limit for colorbar',
default=None)
parser.add_argument('-s', '--surface',
type=str,
metavar='surface',
help='surface - one of "pial", "inflated" or "both"',
default='both')
arguments = parser.parse_args()
return arguments, parser
#------------------------------------------------------------------------------
def read_in_data(cope1_file, cope2_file, cope4_file, subject_id, subjects_dir):
'''
Read in the three nifti files for each hemisphere
and combine into one surface (for each hemisphere)
'''
vtx_data_dict = {}
for hemi in [ 'lh', 'rh' ]:
cope1 = io.project_volume_data(cope1_file,
hemi,
subject_id=subject_id)
cope2 = io.project_volume_data(cope2_file,
hemi,
subject_id=subject_id)
cope4 = io.project_volume_data(cope4_file,
hemi,
subject_id=subject_id)
# Binarize the maps and threshold to get rid of vertices that are only
# created from the smoothing kernel
cope1_bin = np.copy(cope1)
cope1_bin[cope1>0] = 1
cope2_bin = np.copy(cope2)
cope2_bin[cope2>0] = 2
cope4_bin = np.copy(cope4)
cope4_bin[cope4>0] = 4
cope124_bin = cope1_bin + cope2_bin + cope4_bin
vtx_data_dict[hemi] = cope124_bin
# Mask the data so you are only visualising cortex
cortex_fname = os.path.join(subjects_dir, subject_id, 'label', hemi + '.cortex.label')
# Read the data in and mask it so that non-cortex is -99
vtx_data_dict[hemi] = mask_vtx_data(vtx_data_dict[hemi], cortex_fname, thresh)
return vtx_data_dict
#------------------------------------------------------------------------------
def mask_vtx_data(vtx_data, cortex_fname, thresh):
cortex_data = io.nib.freesurfer.read_label(cortex_fname)
# Create a mask of 1s where there is cortex and 0s on the medial wall
mask = np.zeros_like(vtx_data)
mask[cortex_data] = 1
# Set all values that are not in cortex to thresh-1
vtx_data[mask == 0] = thresh-1
return vtx_data
#------------------------------------------------------------------------------
def calc_range(vtx_data_left, vtx_data_right, thresh, l, u):
'''
This is an important step to ensure that the colorbar is exactly
the same for the right and left hemispheres.
'''
if l == None:
# Figure out the min and max for each hemisphere
l_l = vtx_data_left[vtx_data_left>=thresh].min()
l_r = vtx_data_right[vtx_data_right>=thresh].min()
# Take the smallest of these two
l = np.min([l_l, l_r])
# And round to a nice number
l = np.floor(l*20)/20.0
if u == None:
# Figure out the min and max for each hemisphere
u_l = vtx_data_left[vtx_data_left>=thresh].max()
u_r = vtx_data_right[vtx_data_right>=thresh].max()
# Take the largest of these two
u = np.max([u_l, u_r])
# And round to a nice number
u = np.ceil(u*20)/20.0
# Return the lower and upper bounds
return l, u
#------------------------------------------------------------------------------
def plot_surface(vtx_data, subject_id, subjects_dir, hemi, surface, output_dir, prefix, l, u, cmap, center, thresh):
# Open up a brain in pysurfer
brain = Brain(subject_id, hemi, surface,
subjects_dir = subjects_dir,
config_opts=dict(background="white",
height=665,
width=800))
if center:
# Make sure the colorbar is centered
if l**2 < u **2:
l = u*-1
else:
u = l*-1
# Create an empty brain if the values are all below threshold
if np.max(vtx_data) < thresh:
# Add your data to the brain
brain.add_data(vtx_data*0,
l,
u,
thresh = thresh,
colormap=cmap,
alpha=0.0)
# Otherwise, add the data appropriately!
else:
# Add your data to the brain
brain.add_data(vtx_data,
l,
u,
thresh = thresh,
colormap=cmap,
alpha=.8)
# Save the images for medial and lateral
# putting a color bar on all of them
brain.save_imageset(prefix = os.path.join(output_dir, prefix),
views = views_list,
colorbar = range(len(views_list)) )
#-----------------------------------------------------------------------------
def combine_pngs(surface, output_dir):
'''
Find four images and combine them into one nice picture
'''
figsize = (5,4)
fig = plt.figure(figsize = figsize, facecolor='white')
grid = gridspec.GridSpec(2, 2)
grid.update(left=0, right=1, top=1, bottom = 0.08, wspace=0, hspace=0)
f_list = [ os.path.join(output_dir, '_'.join(['lh', surface, 'lateral.png'])),
os.path.join(output_dir, '_'.join(['rh', surface, 'lateral.png'])),
os.path.join(output_dir, '_'.join(['lh', surface, 'medial.png'])),
os.path.join(output_dir, '_'.join(['rh', surface, 'medial.png'])) ]
# Plot each figure in turn
for g_loc, f in zip(grid, f_list):
ax = plt.Subplot(fig, g_loc)
fig.add_subplot(ax)
img = mpimg.imread(f)
# Crop the figures appropriately
# NOTE: this can change depending on which system you've made the
# images on originally - it's a bug that needs to be sorted out!
if 'lateral' in f:
img_cropped = img[75:589,55:(-50),:]
else:
img_cropped = img[45:600,25:(-25),:]
ax.imshow(img_cropped, interpolation='none')
ax.set_axis_off()
# Add the bottom of one of the images as the color bar
# at the bottom of the combo figure
grid_cbar = gridspec.GridSpec(1,1)
grid_cbar.update(left=0, right=1, top=0.08, bottom=0, wspace=0, hspace=0)
ax = plt.Subplot(fig, grid_cbar[0])
fig.add_subplot(ax)
img = mpimg.imread(f)
img_cbar = img[600:,:]
ax.imshow(img_cbar, interpolation='none')
ax.set_axis_off()
# Save the figure
filename = os.path.join(output_dir, '{}_combined.png'.format(surface))
print filename
fig.savefig(filename, bbox_inches=0, dpi=300)
#=============================================================================
# SET SOME VARIABLES
#=============================================================================
# Read in the arguments from argparse
arguments, parser = setup_argparser()
cope1_file = arguments.cope1_file
cope2_file = arguments.cope2_file
cope4_file = arguments.cope4_file
output_dir = arguments.output_dir
subject_id = arguments.subject_id
subjects_dir = arguments.subjects_dir
l = arguments.lower
u = arguments.upper
cmap = arguments.cmap
color_file = arguments.color_file
center = arguments.center
surface = arguments.surface
thresh = arguments.thresh
mask = arguments.mask
if surface == 'both':
surface_list = [ "inflated", "pial" ]
elif surface == 'inflated':
surface_list = [ "inflated" ]
elif surface == 'pial':
surface_list = [ "pial" ]
else:
print "Do not recognise surface. Check {}".format(surface)
parser.print_help()
sys.exit()
hemi_list = [ "lh", "rh" ]
views_list = [ 'medial', 'lateral' ]
# Check how many of the three cope inputs exist:
cope_dict = {}
if cope1_file is not None:
cope_dict[1] = cope1_file
if cope2_file is not None:
cope_dict[2] = cope2_file
if cope4_file is not None:
cope_dict[4] = cope4_file
if len(cope_dict.keys()) == 0:
print "No cope files provided! Exiting."
sys.exit()
# Now check that the files exist
for cope, cope_file in cope_dict.items():
if not os.path.isfile(cope_file):
print "{} file doesn't exist, check {}".format(cope, cope_file)
sys.exit()
# Make the output directory if it doesn't already exist
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
# Set the subjects dir
os.environ['SUBJECTS_DIR'] = subjects_dir
#=============================================================================
# READ IN THE VERTEX DATA
#=============================================================================
vtx_data_dict = read_in_data(cope1_file,
cope2_file,
cope4_file,
subject_id,
subjects_dir)
#=============================================================================
# CALCULATE THE COLOR BAR RANGE
#=============================================================================
# Calculate the lower and upper values if they haven't been defined:
l, u = calc_range(vtx_data_dict['lh'], vtx_data_dict['rh'], thresh, l, u)
# Unless there's a given color file
if color_file:
cmap = [line.strip() for line in open(color_file)]
l = 1
u = len(cmap)
# If you've passed rgb values you need to convert
# these to tuples
if len(cmap[0].split()) == 3:
cmap = [ (np.float(x.split()[0]),
np.float(x.split()[1]),
np.float(x.split()[2])) for x in cmap ]
#=============================================================================
# MAKE THE INDIVIDUAL PICTURES
#=============================================================================
for hemi, surface in it.product(hemi_list, surface_list):
prefix = '_'.join([hemi, surface])
# Show this data on a brain
if colors:
plot_surface(vtx_data_dict[hemi], subject_id, subjects_dir,
hemi, surface,
output_dir, prefix,
l, u, colors, center,
thresh)
else:
plot_surface(vtx_data_dict[hemi], subject_id, subjects_dir,
hemi, surface,
output_dir, prefix,
l, u, cmap, center,
thresh)
#=============================================================================
# COMBINE THE IMAGES
#=============================================================================
for surface in surface_list:
combine_pngs(surface, output_dir)
| mit |
bird-house/birdhouse-workshop | tutorials/03_plotter_cli/plotter.py | 1 | 2728 | import matplotlib
# no X11 server ... must be run first
# https://github.com/matplotlib/matplotlib/issues/3466/
matplotlib.use('Agg')
import matplotlib.pylab as plt
# import ccrs for map projections
import cartopy.crs as ccrs
from netCDF4 import Dataset
import os
DATADIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', '..', 'data')
AIR_DS = os.path.join(DATADIR, 'air.mon.ltm.nc')
def simple_plot(resource, variable=None, timestep=0, output='plot.png'):
"""
Generates a nice and simple plot.
"""
print("Plotting {}, timestep {} ...".format(resource, timestep))
# Create dataset from resource ... a local NetCDF file or a remote OpenDAP URL
ds = Dataset(resource)
# Get the values of the given variable
values = ds.variables[variable]
# Prepare plot with a given size
fig = plt.figure(figsize=(20, 10))
# add projection
ax = plt.axes(projection=ccrs.PlateCarree())
# Render a contour plot for the timestep
plt.contourf(values[timestep, :, :])
# add background image with coastlines
ax.stock_img()
# ax.set_global()
ax.coastlines()
# add a colorbar
plt.colorbar()
# Save the plot to filesystem
fig.savefig(output)
plt.close()
print("Plot written to {}".format(output))
return output
def test_simple_plot():
# raise NotImplementedError("This test is not implemented yet. Help wanted!")
# run default test
output = simple_plot(resource=AIR_DS, variable='air')
assert output == 'plot.png'
# try an invalid variable
try:
simple_plot(resource=AIR_DS, variable='water')
except KeyError:
pass
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Generates a nice and simple plot from a NetCDF file.')
parser.add_argument('dataset', nargs=1, default=AIR_DS,
help='a NetCDF file or an OpenDAP URL')
parser.add_argument('-V', '--variable', nargs='?', default='air',
help='variable to plot (default: air)')
# TODO: add an optional timestep parameter
# parser.add_argument('-t', '--timestep', nargs='?', default=0, type=int,
# help='timestep to plot (default: 0)')
# TODO: add an optional output paramter for the output filename
args = parser.parse_args()
print("dataset={0.dataset}, variable={0.variable}".format(args))
output = simple_plot(resource=args.dataset[0], variable=args.variable)
# TODO: run simple_plot with timestep parameter
# output = simple_plot(resource=args.dataset[0], variable=args.variable,
# timestep=args.timestep)
print("Output: {}".format(output))
| apache-2.0 |
nrz/ylikuutio | external/bullet3/examples/pybullet/examples/rendertest.py | 2 | 4561 | #make sure to compile pybullet with PYBULLET_USE_NUMPY enabled
#otherwise use testrender.py (slower but compatible without numpy)
#you can also use GUI mode, for faster OpenGL rendering (instead of TinyRender CPU)
import os
import sys
import time
import itertools
import subprocess
import numpy as np
import pybullet
from multiprocessing import Process
import pybullet_data
camTargetPos = [0, 0, 0]
cameraUp = [0, 0, 1]
cameraPos = [1, 1, 1]
pitch = -10.0
roll = 0
upAxisIndex = 2
camDistance = 4
pixelWidth = 84 # 320
pixelHeight = 84 # 200
nearPlane = 0.01
farPlane = 100
fov = 60
import matplotlib.pyplot as plt
class BulletSim():
def __init__(self, connection_mode, *argv):
self.connection_mode = connection_mode
self.argv = argv
def __enter__(self):
print("connecting")
optionstring = '--width={} --height={}'.format(pixelWidth, pixelHeight)
optionstring += ' --window_backend=2 --render_device=0'
print(self.connection_mode, optionstring, *self.argv)
cid = pybullet.connect(self.connection_mode, options=optionstring, *self.argv)
pybullet.setAdditionalSearchPath(pybullet_data.getDataPath())
if cid < 0:
raise ValueError
print("connected")
pybullet.configureDebugVisualizer(pybullet.COV_ENABLE_GUI, 0)
pybullet.configureDebugVisualizer(pybullet.COV_ENABLE_SEGMENTATION_MARK_PREVIEW, 0)
pybullet.configureDebugVisualizer(pybullet.COV_ENABLE_DEPTH_BUFFER_PREVIEW, 0)
pybullet.configureDebugVisualizer(pybullet.COV_ENABLE_RGB_BUFFER_PREVIEW, 0)
pybullet.resetSimulation()
pybullet.loadURDF("plane.urdf", [0, 0, -1])
pybullet.loadURDF("r2d2.urdf")
pybullet.loadURDF("duck_vhacd.urdf")
pybullet.setGravity(0, 0, -10)
def __exit__(self, *_, **__):
pybullet.disconnect()
def test(num_runs=300, shadow=1, log=True, plot=False):
if log:
logId = pybullet.startStateLogging(pybullet.STATE_LOGGING_PROFILE_TIMINGS, "renderTimings")
if plot:
plt.ion()
img = np.random.rand(200, 320)
#img = [tandard_normal((50,100))
image = plt.imshow(img, interpolation='none', animated=True, label="blah")
ax = plt.gca()
times = np.zeros(num_runs)
yaw_gen = itertools.cycle(range(0, 360, 10))
for i, yaw in zip(range(num_runs), yaw_gen):
pybullet.stepSimulation()
start = time.time()
viewMatrix = pybullet.computeViewMatrixFromYawPitchRoll(camTargetPos, camDistance, yaw, pitch,
roll, upAxisIndex)
aspect = pixelWidth / pixelHeight
projectionMatrix = pybullet.computeProjectionMatrixFOV(fov, aspect, nearPlane, farPlane)
img_arr = pybullet.getCameraImage(pixelWidth,
pixelHeight,
viewMatrix,
projectionMatrix,
shadow=shadow,
lightDirection=[1, 1, 1],
renderer=pybullet.ER_BULLET_HARDWARE_OPENGL)
#renderer=pybullet.ER_TINY_RENDERER)
stop = time.time()
duration = (stop - start)
if (duration):
fps = 1. / duration
#print("fps=",fps)
else:
fps = 0
#print("fps=",fps)
#print("duraction=",duration)
#print("fps=",fps)
times[i] = fps
if plot:
rgb = img_arr[2]
image.set_data(rgb) #np_img_arr)
ax.plot([0])
#plt.draw()
#plt.show()
plt.pause(0.01)
mean_time = float(np.mean(times))
print("mean: {0} for {1} runs".format(mean_time, num_runs))
print("")
if log:
pybullet.stopStateLogging(logId)
return mean_time
if __name__ == "__main__":
res = []
with BulletSim(pybullet.DIRECT):
print("\nTesting DIRECT")
mean_time = test(log=False, plot=True)
res.append(("tiny", mean_time))
with BulletSim(pybullet.DIRECT):
plugin_fn = os.path.join(
pybullet.__file__.split("bullet3")[0],
"bullet3/build/lib.linux-x86_64-3.5/eglRenderer.cpython-35m-x86_64-linux-gnu.so")
plugin = pybullet.loadPlugin(plugin_fn, "_tinyRendererPlugin")
if plugin < 0:
print("\nPlugin Failed to load!\n")
sys.exit()
print("\nTesting DIRECT+OpenGL")
mean_time = test(log=True)
res.append(("plugin", mean_time))
with BulletSim(pybullet.GUI):
print("\nTesting GUI")
mean_time = test(log=False)
res.append(("egl", mean_time))
print()
print("rendertest.py")
print("back nenv fps fps_tot")
for r in res:
print(r[0], "\t", 1, round(r[1]), "\t", round(r[1]))
| agpl-3.0 |
elijah513/scikit-learn | sklearn/neighbors/tests/test_ball_tree.py | 129 | 10192 | import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V = rng.rand(3, 3)
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=np.random.random(DIMENSION)),
'wminkowski': dict(p=3, w=np.random.random(DIMENSION)),
'mahalanobis': dict(V=V)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
np.random.seed(0)
X = np.random.random((40, 10)).round(0)
Y = np.random.random((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
np.random.seed(0)
X = (4 * np.random.random((40, 10))).round(0)
Y = (4 * np.random.random((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius(query_pt, r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius(query_pt, r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_ball_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
np.random.seed(0)
X = np.random.random((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
np.random.seed(0)
X = 2 * np.pi * np.random.random((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
| bsd-3-clause |
shusenl/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
smcantab/pele | playground/soft_sphere/soft_sphere_bulk.py | 5 | 6738 | import numpy as np
from pele.systems import BaseSystem
from pele.mindist.periodic_exact_match import ExactMatchPeriodic, MeasurePeriodic
from pele.mindist.periodic_mindist import MinDistBulk
from pele.systems.morse_bulk import put_in_box
from pele.potentials import InversePower
from pele.takestep import RandomDisplacement
from pele.transition_states._interpolate import InterpolateLinearMeasure
class SoftSphereSystem(BaseSystem):
"""Binary Lennard Jones potential with periodic boundary conditions"""
def __init__(self, radii, boxvec, power=2):
super(SoftSphereSystem, self).__init__(self)
self.radii = np.array(radii)
self.natoms = self.radii.size
self.boxvec = np.array(boxvec)
self.periodic = True
self.potential_kwargs = dict()
self.power = power
self.eps = 1.
self.set_params(self.params)
self.params.double_ended_connect.local_connect_params.NEBparams.interpolator = InterpolateLinearMeasure(MeasurePeriodic(self.boxvec))
def set_params(self, params):
params.database.accuracy = 1e-4
params.structural_quench_params.tol = 1e-9
# params.double_ended_connect.local_connect_params.tsSearchParams.iprint = 1
params.double_ended_connect.local_connect_params.tsSearchParams.hessian_diagonalization = True
params.takestep.stepsize = .4
def get_potential(self):
return InversePower(self.power, self.eps, self.radii, boxvec=self.boxvec, **self.potential_kwargs)
def get_random_configuration(self):
x = np.zeros([self.natoms, self.boxvec.size])
for i in range(self.boxvec.size):
x[:, i] = np.random.uniform(-self.boxvec[i] / 2., self.boxvec[i] / 2., self.natoms)
return x.ravel()
def draw(self, coordslinear, index):
from pele.systems._opengl_tools import draw_atomic_binary_polydisperse, draw_box
put_in_box(coordslinear, self.boxvec)
draw_atomic_binary_polydisperse(coordslinear, index, bdim=self.boxvec.size, radii=self.radii)
draw_box(self.boxvec)
def get_permlist(self):
return []
def get_mindist(self):
measure = MeasurePeriodic(self.boxvec, permlist=self.get_permlist())
return MinDistBulk(self.boxvec, measure)
def get_compare_exact(self):
accuracy = 1e-2
measure = MeasurePeriodic(self.boxvec, self.get_permlist())
compare = ExactMatchPeriodic(measure, accuracy=accuracy)
return compare
def get_orthogonalize_to_zero_eigenvectors(self):
# TODO: there are some zero eigenvectors which can be removed
return None
def get_system_properties(self):
return dict(natoms=int(self.natoms),
radii=self.radii,
power=self.power,
eps=self.eps,
boxvec=self.boxvec,
potential="Soft Sphere Bulk",
potential_kwargs=self.potential_kwargs,
)
def get_metric_tensor(self, coords):
return None
def get_takestep(self, **kwargs):
"""return the takestep object for use in basinhopping, etc.
"""
d = dict(self.params.takestep)
d.update(kwargs)
kwargs = d
try:
stepsize = kwargs.pop("stepsize")
except KeyError:
stepsize = 0.6
takeStep = RandomDisplacement(stepsize=stepsize)
return takeStep
def create_soft_sphere_system_from_db(dbname):
from pele.storage import Database
db = Database(dbname, createdb=False)
radii = db.get_property("radii").value()
boxvec = db.get_property("boxvec").value()
power = db.get_property("power").value()
print radii
system = SoftSphereSystem(radii, boxvec, power=power)
db = system.create_database(dbname, createdb=False)
return system, db
#
# testing only below here
#
def rungui(): # pragma: no cover
import os
from pele.gui import run_gui
dbfname = "test24.sqlite"
if os.path.isfile(dbfname):
system, db = create_soft_sphere_system_from_db(dbfname)
else:
natoms = 24
boxl = 3
boxvec = np.ones(3) * boxl
# system = MorseCluster(natoms, rho=1.6047, r0=2.8970, A=0.7102, rcut=9.5)
radii = np.ones(natoms) * .6
radii += np.random.uniform(-1,1,radii.size) * 1e-1
system = SoftSphereSystem(radii, boxvec, power=2.5)
db = system.create_database("test24.sqlite")
run_gui(system, db)
def plot_potential():
from matplotlib import pyplot as plt
natoms = 3
boxl = 10.
boxvec = np.ones(3) * boxl
# system = MorseCluster(natoms, rho=1.6047, r0=2.8970, A=0.7102, rcut=9.5)
radii = np.ones(natoms) * 1.4
system = SoftSphereSystem(radii, boxvec, power=4)
pot = system.get_potential()
rlist = np.linspace(0,1.5,400)
elist = [pot.getEnergy(np.array([0,0.,0.,r,.0,0, 2.5, 0, 0])) for r in rlist]
plt.plot(rlist, elist)
print elist
plt.show()
def test_exact_match():
natoms = 24
boxl = 3
boxvec = np.ones(3) * boxl
# system = MorseCluster(natoms, rho=1.6047, r0=2.8970, A=0.7102, rcut=9.5)
radii = np.ones(natoms) * .6
system = SoftSphereSystem(radii, boxvec, power=2.5)
x1 = np.genfromtxt("coords1")
x2 = np.genfromtxt("coords2")
mindist = system.get_mindist()
match = system.get_compare_exact()
dist = mindist(x1, x2)[0]
print dist
ret = match(x1, x2)
print ret
def test_script():
natoms = 24
boxl = 3
boxvec = np.ones(3) * boxl
# system = MorseCluster(natoms, rho=1.6047, r0=2.8970, A=0.7102, rcut=9.5)
radii = np.ones(natoms) * .6
system = SoftSphereSystem(radii, boxvec, power=2.5)
db = system.create_database()
bh = system.get_basinhopping(db)
bh.run(100)
from pele.landscape import ConnectManager
manager = ConnectManager(db, strategy="random")
for i in xrange(10):
try:
m1, m2 = manager.get_connect_job()
except manager.NoMoreConnectionsError:
break
connect = system.get_double_ended_connect(m1, m2, db)
connect.connect()
def print_info(dbfname="ss60.sqlite"):
system, db = create_soft_sphere_system_from_db(dbfname)
import networkx as nx
from pele.landscape import database2graph
graph = database2graph(db)
cc = nx.connected_components(graph)
c = cc[0]
for i in xrange(3):
print c[i].energy
if __name__ == "__main__":
# plot_potential()
# rungui()
# test_exact_match()
# test_script()
print_info()
| gpl-3.0 |
wedel/TorHS_SpeedDating | torps_hs/plot/comp_rates_times_plot.py | 2 | 39121 | import os
import cPickle as pickle
import sys
import numpy as np
from numpy import ma
# from ggplot import *
# import pandas as pd
import matplotlib
matplotlib.use('PDF') # alerts matplotlib that display not required
import matplotlib.pyplot
from matplotlib import scale as mscale
from matplotlib import transforms as mtransforms
from matplotlib.ticker import FixedFormatter, FixedLocator
import matplotlib.cm as cm
import math
from scipy.interpolate import spline
from itertools import cycle
import pylab
import collections
from collections import Counter
import scipy as sp
import scipy.stats
##### Plotting functions #####
## helper - cumulative fraction for y axis
def cf(d): return np.arange(1.0,float(len(d))+1.0)/float(len(d))
## helper - return step-based CDF x and y values
## only show to the 99th percentile by default
def getcdf(data, shownpercentile=0.99):
data.sort()
frac = cf(data)
print("len(data)", len(data), "len(cf(data)):", len(frac))
x, y, lasty = [0], [0], 0.0
for i in xrange(int(round(len(data)*shownpercentile))):
x.append(data[i])
# y.append(lasty)
# x.append(data[i])
y.append(frac[i])
# lasty = frac[i]
return (x, y)
def find_nearest(array, values):
indices = np.abs(np.subtract.outer(array, values)).argmin(0)
return array[indices], indices
def plot_cdf(stats, out_pathname, legend_x):
# color=iter(matplotlib.cm.Set1(np.linspace(0,0.5,len(stats.keys()))))
colors=iter(['red','black','red','black'])
print("len(stats)=%d"%(len(stats)))
x_max = 0
matplotlib.rcParams['xtick.labelsize'] = 20
matplotlib.rcParams['ytick.labelsize'] = 20
if len(stats)==2:
folge = ['Vanilla', 'Gekuerzt'] #['Einfach', 'Botnetz']
linecycler = cycle(["-","--"])
elif len(stats)==4:
folge = ['Vanilla_Einfach', 'Vanilla_Botnetz', 'Gekuerzt_Einfach', 'Gekuerzt_Botnetz']
linecycler = cycle(["-","-","--","--"])
else:
print("error. len(stats)=%d"%(len(stats)))
sys.exit(1)
for line in folge:
# print(line)
try:
data=stats[line]
except:
line_n = [s for s in stats.keys() if line in s][0]
print("%s was found in stats as %s."%(line,line_n))
data = stats[line_n]
# sorted_data=np.sort(data)
# yvals=np.arange(1.0,float(len(sorted_data))+1.0)/float(len(sorted_data))
# print(data)
#yvals=np.arange(len(sorted_data))/float(len(sorted_data))
# # new x values
# xn_ax = np.linspace(sorted_data.min(), sorted_data.max(), 200)
# print(len(xn_ax))
# # new y values
# yn_ax = spline(sorted_data, yvals, xn_ax)
# print(len(yn_ax))
data_max = max(data)
data_shown = filter(lambda x: x < data_max, data)
shown_percentile = float(len(data_shown)) / len(data)
print("shown_percentile:",shown_percentile)
x, y = getcdf(data, shown_percentile)
# print(find_nearest(y, 0.50))
if shown_percentile > 0:
q, i = find_nearest(y, 0.80)
# print(out_pathname, legend_x)
print("%s: data[%d]=%f equals %f."\
%(line,i,y[i],x[i]))
q, i = find_nearest(y, 0.70)
# print(out_pathname, legend_x)
print("%s: data[%d]=%f equals %f."\
%(line,i,y[i],x[i]))
q, i = find_nearest(y, 0.50)
# print(out_pathname, legend_x)
print("%s: data[%d]=%f equals %f."\
%(line,i,y[i],x[i]))
q, i = find_nearest(y, 0.30)
# print(out_pathname, legend_x)
print("%s: data[%d]=%f equals %f."\
%(line,i,y[i],x[i]))
q, i = find_nearest(y, 0.20)
# print(out_pathname, legend_x)
print("%s: data[%d]=%f equals %f."\
%(line,i,y[i],x[i]))
# print(line)
# print(y[i],i)
# print(x[i])
# print(x)
if len(x) != 0 and x_max < max(x):
x_max = max(x)
matplotlib.pyplot.plot(x, y,
color=next(colors),
label = line,
linewidth = 2,
linestyle=next(linecycler))
# matplotlib.pyplot.style.use('ggplot')
# matplotlib.pyplot.figure(figsize=(8,6), dpi=72, facecolor="white")
matplotlib.pyplot.legend(loc='best', fontsize=20) #, fontsize = 'small')
# if legend_x == 'Days from first stream':
# matplotlib.pyplot.xlim(xmin=0.0, xmax=x_max)
# else:
# matplotlib.pyplot.xlim(xmin=0.0)
# matplotlib.pyplot.xscale('symlog')
matplotlib.pyplot.xlim(xmin=0.0, xmax=x_max) # x_max) #0.2)
matplotlib.pyplot.ylim(ymin=0.0)
# matplotlib.pyplot.yscale('log')
# matplotlib.pyplot.yscale('close_to_one')
matplotlib.pyplot.yticks(np.arange(0, 1.2, 0.2))
# matplotlib.pyplot.xticks(np.arange(0, x_max, 0.1))
matplotlib.pyplot.xlabel(legend_x, fontsize=20)
matplotlib.pyplot.ylabel('eCDF', fontsize=20)
# matplotlib.pyplot.title(title, fontsize=fontsize)
# matplotlib.pyplot.grid()
matplotlib.pyplot.tight_layout()
matplotlib.pyplot.savefig(out_pathname)
matplotlib.pyplot.close()
#########################################################################
# Export Data
#########################################################################
a = []
h = []
for line in folge:
h.append(line)
data=stats[line]
y, x = getcdf(data, 1)
a.append(x)
a.append(y)
h = ", ".join(h)
np.savetxt(out_pathname+".dat", np.transpose(a), header=h, fmt="%10s")
#########################################################################
def first_comp_times(start_time, end_time, stats_list):
"""Turns compromise stats into first times of guard/exit/guard&exit compromise."""
time_len = float(end_time - start_time)/float(24*60*60) # Time_len in Days
guard_comp_times = []
middel_comp_times = []
exit_comp_times = []
gm_comp_times = []
ge_comp_times = []
em_comp_times = []
gme_comp_times = []
gm_ge_comp_times = []
# print(stats_list)
for stats in stats_list:
guard_comp_time = time_len
middel_comp_time = time_len
exit_comp_time = time_len
ge_time = time_len
gm_time = time_len
em_time = time_len
gme_time = time_len
gm_ge_comp_time = time_len
if (stats['guard_only_time'] != None):
guard_comp_time = float(stats['guard_only_time'] -\
start_time)/float(24*60*60)
if (stats['exit_only_time'] != None):
exit_comp_time = float(stats['exit_only_time'] -\
start_time)/float(24*60*60)
if (stats['middle_only_time'] != None):
middel_comp_time = float(stats['middle_only_time'] -\
start_time)/float(24*60*60)
if (stats['guard_and_exit_time'] != None):
ge_time = float(stats['guard_and_exit_time'] -\
start_time)/float(24*60*60)
guard_comp_time = min(ge_time, gme_time, gm_time, guard_comp_time)
exit_comp_time = min(exit_comp_time, ge_time, em_time, gme_time)
gm_ge_comp_time = min(gm_time, ge_time, gme_time, gm_ge_comp_time)
if (stats['guard_and_middle_time'] != None):
gm_time = float(stats['guard_and_middle_time'] -\
start_time)/float(24*60*60)
guard_comp_time = min(ge_time, gme_time, gm_time, guard_comp_time)
middel_comp_time = min(middel_comp_time, em_time, gme_time, gm_time)
gm_ge_comp_time = min(gm_time, ge_time, gme_time, gm_ge_comp_time)
if (stats['exit_and_middle_time'] != None):
em_time = float(stats['exit_and_middle_time'] -\
start_time)/float(24*60*60)
middel_comp_time = min(middel_comp_time, em_time, gme_time, gm_time)
exit_comp_time = min(exit_comp_time, ge_time, em_time, gme_time)
if (stats['guard_and_middle_and_exit_time'] != None):
gme_time = float(stats['guard_and_middle_and_exit_time'] -\
start_time)/float(24*60*60)
guard_comp_time = min(ge_time, gme_time, gm_time, guard_comp_time)
exit_comp_time = min(exit_comp_time, ge_time, em_time, gme_time)
middel_comp_time = min(middel_comp_time, em_time, gme_time, gm_time)
gm_ge_comp_time = min(gm_time, ge_time, gme_time, gm_ge_comp_time)
guard_comp_times.append(guard_comp_time)
middel_comp_times.append(middel_comp_time)
exit_comp_times.append(exit_comp_time)
gm_comp_times.append(gm_time)
ge_comp_times.append(ge_time)
em_comp_times.append(em_time)
gme_comp_times.append(gme_time)
gm_ge_comp_times.append(gm_ge_comp_time)
return (guard_comp_times,
middel_comp_times,
exit_comp_times,
gm_comp_times,
ge_comp_times,
em_comp_times,
gme_comp_times,
gm_ge_comp_times)
def comp_set_plot_times(start_times, end_times, compromise_stats,
out_dir, out_name, figsize = None, fontsize = 'small',
legend_locs = {'guard':'lower right', 'exit':'lower right', 'both':'lower right'}):
"""
Plots cdfs of times to compromise for compromised-set statistics.
Input:
start_times: timestamps of simulation starts for each dataset
end_times: timestamps of simulation ends for each dataset
compromise_stats: (list) each element is a list of statistics
calculated for compromised set
out_dir: output directory
out_name: string to comprise part of output filenames
"""
stats_guard_comp_times = {}
stats_middel_comp_times = {}
stats_exit_comp_times = {}
stats_gm_comp_times = {}
stats_ge_comp_times = {}
stats_em_comp_times = {}
stats_gme_comp_times = {}
stats_gm_ge_comp_times = {}
stats_client_gm_loc_times = {}
stats_client_g_loc_times = {}
stats_client_guard_dec_times = {}
print(compromise_stats.keys())
print(start_times)
print(end_times)
for key in sorted(compromise_stats.keys()):
start_time = start_times[key]
end_time = end_times[key]
(guard_comp_times,
middel_comp_times,
exit_comp_times,
gm_comp_times,
ge_comp_times,
em_comp_times,
gme_comp_times,
gm_ge_comp_times) = first_comp_times(start_time, end_time,
compromise_stats[key])
stats_guard_comp_times[key] = guard_comp_times
stats_middel_comp_times[key] = middel_comp_times
stats_exit_comp_times[key] = exit_comp_times
stats_gm_comp_times[key] = gm_comp_times
stats_ge_comp_times[key] = ge_comp_times
stats_em_comp_times[key] = em_comp_times
stats_gme_comp_times[key] = gme_comp_times
stats_gm_ge_comp_times[key] = gm_ge_comp_times
# GuardMiddleAttac equals stats_client_gm_loc_times
# GuardAttac equals stats_client_g_loc_times
if key.startswith('Vanilla'):
print('%s stats with Vanilla'%(key))
stats_client_gm_loc_times[key] = gm_ge_comp_times
stats_client_g_loc_times[key] = guard_comp_times
stats_client_guard_dec_times[key] = middel_comp_times
elif key.startswith('Gekuerzt'):
print('%s stats with Gekuerzt'%(key))
stats_client_gm_loc_times[key] = guard_comp_times
stats_client_g_loc_times[key] = guard_comp_times
stats_client_guard_dec_times[key] = exit_comp_times
else:
print('Error: %s isnt fomated as expected'%(key))
# cdf of all bad
print('Will plot guard_comp_times')
# print(stats_frac_rp_located.keys())
out_filename = out_name + 'guard_comp_times.pdf'
out_pathname = os.path.join(out_dir, out_filename)
plot_cdf(stats_guard_comp_times, out_pathname, 'Tage ab dem ersten Rendezvous Circuit')
print('Will plot client_guard_dec_times')
# print(stats_frac_client_located.keys())
out_filename = out_name + 'client_guard_dec_times.pdf'
out_pathname = os.path.join(out_dir, out_filename)
plot_cdf(stats_client_guard_dec_times, out_pathname, 'Tage ab dem ersten Rendezvous Circuit')
# print('Will plot middel_comp_times')
# # print(stats_frac_hs_located.keys())
# out_filename = out_name + 'middel_comp_times.pdf'
# out_pathname = os.path.join(out_dir, out_filename)
# plot_cdf(stats_middel_comp_times, out_pathname, 'Tage ab dem ersten Rendezvous Circuit')
#
# print('Will plot exit_comp_times')
# # print(stats_frac_client_located.keys())
# out_filename = out_name + 'exit_comp_times.pdf'
# out_pathname = os.path.join(out_dir, out_filename)
# plot_cdf(stats_exit_comp_times, out_pathname, 'Tage ab dem ersten Rendezvous Circuit')
#
# print('Will plot gm_comp_times')
# # print(stats_frac_client_located.keys())
# out_filename = out_name + 'gm_comp_times.pdf'
# out_pathname = os.path.join(out_dir, out_filename)
# plot_cdf(stats_gm_comp_times, out_pathname, 'Tage ab dem ersten Rendezvous Circuit')
#
# print('Will plot ge_comp_times')
# # print(stats_frac_client_located.keys())
# out_filename = out_name + 'ge_comp_times.pdf'
# out_pathname = os.path.join(out_dir, out_filename)
# plot_cdf(stats_ge_comp_times, out_pathname, 'Tage ab dem ersten Rendezvous Circuit')
#
# print('Will plot em_comp_times')
# # print(stats_frac_client_located.keys())
# out_filename = out_name + 'em_comp_times.pdf'
# out_pathname = os.path.join(out_dir, out_filename)
# plot_cdf(stats_em_comp_times, out_pathname, 'Tage ab dem ersten Rendezvous Circuit')
#
# print('Will plot gme_comp_times')
# # print(stats_frac_client_located.keys())
# out_filename = out_name + 'gme_comp_times.pdf'
# out_pathname = os.path.join(out_dir, out_filename)
# plot_cdf(stats_gme_comp_times, out_pathname, 'Tage ab dem ersten Rendezvous Circuit')
#
# print('Will plot gm_ge_comp_times')
# # print(stats_frac_client_located.keys())
# out_filename = out_name + 'gm_ge_comp_times.pdf'
# out_pathname = os.path.join(out_dir, out_filename)
# plot_cdf(stats_gm_ge_comp_times, out_pathname, 'Tage ab dem ersten Rendezvous Circuit')
#
# print('Will plot client_gm_loc_times')
# # print(stats_frac_client_located.keys())
# out_filename = out_name + 'client_loc_gm_times.pdf'
# out_pathname = os.path.join(out_dir, out_filename)
# plot_cdf(stats_client_gm_loc_times, out_pathname, 'Tage ab dem ersten Rendezvous Circuit')
#
# print('Will plot client_g_loc_times')
# # print(stats_frac_client_located.keys())
# out_filename = out_name + 'client_loc_g_times.pdf'
# out_pathname = os.path.join(out_dir, out_filename)
# plot_cdf(stats_client_g_loc_times, out_pathname, 'Tage ab dem ersten Rendezvous Circuit')
def sliding_mean(data_array, window=5):
data_array = data_array
new_list = []
for i in range(len(data_array)):
indices = range(max(i - window + 1, 0),
min(i + window + 1, len(data_array)))
avg = 0
for j in indices:
avg += data_array[j]
avg /= float(len(indices))
new_list.append(avg)
return new_list
def mean_confidence_interval(amount_datapoint, ammount_all, confidence=0.95):
a = np.append(1.0*np.array(np.ones(amount_datapoint)),1.0*np.array(np.zeros(ammount_all-amount_datapoint)))
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * sp.stats.t._ppf((1+confidence)/2., n-1)
return m, m-h, m+h
# def sliding_mean(data_array, dates, day=1):
#
# y = len(dates[0:dates.index(float(day))])
# print("y=%d. dates[0:y]=%s. dates[y+1:y*2]=%s."%(y, dates[0:y], dates[y+1:y*2]))
# new_list = [data_array[x:x+y] for x in range(0, len(data_array),y)]
# print("len(Data_array)=%d, len(new_list)=%d."%\
# (len(data_array), len(new_list)))
# indices = range(max(i - window + 1, 0),
# min(i + window + 1, len(data_array)))
# avg = 0
# for j in indices:
# avg += data_array[j]
# avg /= float(len(indices))
# new_list.append(avg)
#
# return new_list
def loc_at_t_plot(compromise_stats, ammount_sims, out_dir, out_name):
# stats_client_loc_on_times = {}
# for key in sorted(compromise_stats.keys()):
# # key = dirname
# # compromise_stats[key] = Array of all simulated Clients
# # for stats in compromise_stats[key]:
# # # stats = {time:Compromised(Bool), time:Compromised(Bool), ...}
# # pass
# # {k: sum(d[k] for d in dict1) for k in dict1[0]}
#
# #Absolut:
# # stats_client_loc_on_times[key] = {t:sum(d[t] for d in compromise_stats[key]) for t in compromise_stats[key][0]}
# #Mean:
# stats_client_loc_on_times[key] =\
# {(float(t)/float(24*60*60)):(float(sum(d[t])/len(compromise_stats[key])) \
# for d in compromise_stats[key]) for t in compromise_stats[key][0]}
print("loc_at_t_plot(): Will divide values by %d."%ammount_sims)
stats_client_loc_on_times = compromise_stats
# print('Will plot client_anteil')
# out_filename = out_name + 'client_anteil.pdf'
# out_pathname = os.path.join(out_dir, out_filename)
# tmp_dir = {}
# for key in stats_client_loc_on_times:
# print("Anzahl Values for %s: %d"%(key, len(stats_client_loc_on_times[key].values())))
# if len(stats_client_loc_on_times[key].values()) > 0:
# tmp_dir[key] = [(float(v)/ammount_sims) for v in stats_client_loc_on_times[key].values()]
# # dire = {k:v.values() for k,v in stats_client_loc_on_times}
# plot_cdf(tmp_dir, out_pathname, 'Anteil der Clients')
print('Will plot client_loc_at_times')
out_filename = out_name + 'client_loc_at_times.pdf'
out_pathname = os.path.join(out_dir, out_filename)
# print(len(stats_client_loc_on_times['Gekuerzt_Botnetz']))
# print(stats_client_loc_on_times['Gekuerzt_Botnetz'][0:5])
# for i in range(10):
# print(stats_client_loc_on_times['Gekuerzt_Botnetz'][i]/float(24*60*60))
# print(stats_client_loc_on_times['Gekuerzt_Botnetz'][0:5])
max_time = float(max(stats_client_loc_on_times['Vanilla_Einfach'].keys()))/float(24*60*60)
min_time = float(min(stats_client_loc_on_times['Vanilla_Einfach'].keys()))/float(24*60*60)
print(min_time, max_time)
print(stats_client_loc_on_times['Vanilla_Einfach'].keys()[0:5])
# color=iter(matplotlib.cm.Set1(np.linspace(0,0.5,len(stats_client_loc_on_times.keys()))))
colors=iter(['red','black','red','black'])
# lines = ["-","-","--","--"]
# linecycler = cycle(lines)
if len(stats_client_loc_on_times)==2:
folge = ['Vanilla', 'Gekuerzt']
linecycler = cycle(["-","--"])
elif len(stats_client_loc_on_times)==4:
folge = ['Vanilla_Einfach', 'Vanilla_Botnetz', 'Gekuerzt_Einfach', 'Gekuerzt_Botnetz']
linecycler = cycle(["-","-","--","--"])
else:
print("error. len(stats)=%d"%(len(stats)))
sys.exit(1)
for key in folge:
try:
data=stats_client_loc_on_times[key]
except:
line_n = [s for s in stats_client_loc_on_times.keys() if key in s][0]
print("%s was found in stats as %s."%(key,line_n))
data = stats_client_loc_on_times[line_n]
if len((data.items())) == 0:
continue
print(key)
print(len(data.items()))
# print(type(data))
# print(data.items()[0])
#mean
# data = {(float(t)/float(24*60*60)):(float(v)/ammount_sims) for t,v in data.items()}
# data = collections.OrderedDict(sorted(data.items()))
print(mean_confidence_interval(data.values()[0], ammount_sims,0.75))
data = {(float(t)/float(24*60*60)):float(mean_confidence_interval(v,ammount_sims)[0]) for t,v in data.items()}
data = collections.OrderedDict(sorted(data.items()))
# data_y = sliding_mean(data.values(), data.keys())
dates = data.keys()
y = len(dates[0:dates.index(float(1))])
# print("y=%d. dates[0:y]=%s. dates[y+1:y*2]=%s."%(y, dates[0:y], dates[y+1:y*2]))
data_y = sliding_mean(data.values(),y)
if key == 'Gekuerzt_Botnetz':
data_bot = data_y
elif key == 'Gekuerzt_Einfach':
data_easy = data_y
# data_sem_max = {(float(t)/float(24*60*60)):float(mean_confidence_interval(v,ammount_sims,0.75)[2]) for t,v in data.items()}
# data_sem_max = collections.OrderedDict(sorted(data.items()))
# # data_sem_max_y = sliding_mean(data_sem_max.values(),y)
# data_sem_min = {(float(t)/float(24*60*60)):float(mean_confidence_interval(v,ammount_sims,0.75)[1]) for t,v in data.items()}
# data_sem_min = collections.OrderedDict(sorted(data.items()))
# data_sem_min_y = sliding_mean(data_sem_min.values(),y)
# data_sem = {float(t):((float(v)/float(math.sqrt(ammount_sims)))) for t,v in data.items()}
# data_sem = collections.OrderedDict(sorted(data.items()))
# data_y_sem = sliding_mean(data_sem.values())
# data_y_sem_min = [min(y-s,0) for y,s in zip(data_y, data_y_sem)]
# data_y_sem_max = [y+s for y,s in zip(data_y, data_y_sem)]
#Absolut
#data= (stats_client_loc_on_times[key]
col=next(colors)
# matplotlib.pyplot.fill_between(data_sem_min.keys(), data_sem_min.values(),
# data_sem_max.values(), color=col, alpha=0.5)
matplotlib.pyplot.plot(data.keys(), data_y,
color=col,
label = key,
linewidth = 2,
linestyle=next(linecycler))
# matplotlib.pyplot.line(data.keys(), data.values(),
# color=next(color),
# label = key,
# linewidth = 1,
# linestyle=next(linecycler))
# matplotlib.pyplot.scatter(data.keys(), data.values(),
# color=next(color),
# label = key,
# s=1,
# alpha=0.1)
# linewidth = 1,
# linestyle=next(linecycler))
# matplotlib.pyplot.hist(data.values(), bins=len(data.keys())),
# histtype='step',
# stacked=True,
# fill=True)
# matplotlib.pyplot.xcorr(data.keys(), data.values(),
# # s=area,
# # c=next(color),
# label=key)
# # alpha=0.5)
# print("len data_bot: %d"%(len(data_bot)))
# diff_array = np.array([float(a)-float(b) for a,b in zip(data_bot,data_easy)])
# # print(diff_array)
# max_diff = float(max(diff_array.max(),0.0))
# min_diff = float(max(diff_array.min(),0.0))
#
# print("Max-Diff: %f. Min-Diff: %f."%(max_diff, min_diff))
matplotlib.rcParams['xtick.labelsize'] = 18
matplotlib.rcParams['ytick.labelsize'] = 18
matplotlib.pyplot.legend(loc='best', fontsize=20) #, fontsize = 'small')
matplotlib.pyplot.xlim(xmin=min_time, xmax=max_time)
matplotlib.pyplot.ylim(ymin=0.0)#,ymax=0.6)
# matplotlib.pyplot.yticks(np.arange(0, 1.1, 0.1))
# matplotlib.pyplot.yticks(np.arange(0, 0.61, 0.1))
matplotlib.pyplot.xlabel('Tage seit die Relays des Angreifers im Netzwerk sind', fontsize=20)
# matplotlib.pyplot.ylabel('Mean Anteil Kompromittierter Clients', fontsize=20)
matplotlib.pyplot.ylabel('Wahrscheinlichkeit', fontsize=20)
# matplotlib.pyplot.title(title, fontsize=fontsize)
# matplotlib.pyplot.grid()
matplotlib.pyplot.tight_layout()
matplotlib.pyplot.savefig(out_pathname)
matplotlib.pyplot.close()
def relay_comp_plot_cdf(compromise_stats, out_dir, out_name, figsize = None, fontsize = 'small',
legend_locs = {'guard':'lower right', 'exit':'lower right', 'both':'lower right'}):
"""
Plots cdfs of compromise fractions for compromised-set statistics.
Input:
compromise_stats: (list) each element is a list of statistics
calculated for the compromised set
line_labels: (list) each element is a line label or None if only
one line to be plotted
out_dir: directory for output files
out_name: identifying string to be incorporated in filenames
"""
stats_frac_guard_comp = {}
stats_frac_middle_comp = {}
stats_frac_exit_comp = {}
stats_gm_comp = {}
stats_ge_comp = {}
stats_em_comp = {}
stats_gme_comp = {}
stats_gm_ge_comp = {}
stats_client_g_loc = {}
stats_client_gm_loc = {}
stats_client_guard_desc = {}
# print(compromise_stats.keys())
# print(len(compromise_stats.keys()))
for key in sorted(compromise_stats.keys()):
frac_guard_comp = []
frac_middle_comp = []
frac_exit_comp =[]
frac_gm_comp = []
frac_ge_comp = []
frac_em_comp = []
frac_gme_comp = []
frac_gm_ge_comp = []
print(len(compromise_stats[key]))
# print(len(compromise_stats[key]))
for stats in compromise_stats[key]:
# print "len(stats)=",len(stats)
# print(stats)
tot_ct = float(stats['guard_only_bad'] +
stats['middle_only_bad'] +
stats['exit_only_bad'] +
stats['guard_and_exit_bad'] +
stats['guard_and_middle_bad'] +
stats['exit_and_middle_bad'] +
stats['guard_and_middle_and_exit_bad'] +
stats['good'])
# print(tot_ct)
frac_guard_comp.append(\
float(stats['guard_only_bad'] +
stats['guard_and_exit_bad'] +
stats['guard_and_middle_bad'] +
stats['guard_and_middle_and_exit_bad']) / float(tot_ct))
frac_middle_comp.append(\
float(stats['middle_only_bad'] +
stats['guard_and_middle_bad'] +
stats['exit_and_middle_bad'] +
stats['guard_and_middle_and_exit_bad']) / float(tot_ct))
frac_exit_comp.append(\
float(stats['exit_only_bad'] +
stats['guard_and_exit_bad'] +
stats['exit_and_middle_bad'] +
stats['guard_and_middle_and_exit_bad']) / float(tot_ct))
frac_gm_comp.append(\
float(stats['guard_and_middle_bad'] +
stats['guard_and_middle_and_exit_bad']) / float(tot_ct))
frac_ge_comp.append(\
float(stats['guard_and_exit_bad'] +
stats['guard_and_middle_and_exit_bad']) / float(tot_ct))
frac_em_comp.append(\
float(stats['exit_and_middle_bad'] +
stats['guard_and_middle_and_exit_bad']) / float(tot_ct))
frac_gme_comp.append(\
float(stats['guard_and_middle_and_exit_bad']) / float(tot_ct))
frac_gm_ge_comp.append(\
float(stats['guard_and_exit_bad'] +
stats['guard_and_middle_bad'] +
stats['guard_and_middle_and_exit_bad']) / float(tot_ct))
# client_anteil_loc = [frac_guard_comp.sort()]
stats_frac_guard_comp[key] = frac_guard_comp
stats_frac_middle_comp[key] = frac_middle_comp
stats_frac_exit_comp[key] = frac_exit_comp
stats_gm_comp[key] = frac_gm_comp
stats_ge_comp[key] = frac_ge_comp
stats_em_comp[key] = frac_em_comp
stats_gme_comp[key] = frac_gme_comp
stats_gm_ge_comp[key] = frac_gm_ge_comp
# GuardMiddleAttac equals stats_client_gm_loc_times
# GuardAttac equals stats_client_g_loc_times
if key.startswith('Vanilla'):
print('%s stats with Vanilla'%(key))
stats_client_gm_loc[key] = frac_gm_ge_comp
stats_client_g_loc[key] = frac_guard_comp
stats_client_guard_desc[key] = frac_middle_comp
elif key.startswith('Gekuerzt'):
print('%s stats with Gekuerzt'%(key))
stats_client_gm_loc[key] = frac_guard_comp
stats_client_g_loc[key] = frac_guard_comp
stats_client_guard_desc[key] = frac_exit_comp
else:
print('Error: %s isnt fomated as expected'%(key))
# if key.startswith('Vanilla'):
# print('%s stats with vanilla'%(key))
# if key.endswith('MiddleAdv'):
# print('%s ends with MiddleAdv'%(key))
# stats_client_loc[key] = frac_gm_ge_comp
# stats_client_guard_desc[key] =
# elif key.endswith('GuardAdv'):
# print('%s ends with GuardAdv'%(key))
# stats_client_loc[key] = frac_guard_comp
# elif key.startswith('Gekuerzt'):
# print('%s stats with short'%(key))
# stats_client_loc[key] = frac_guard_comp
# else:
# print('Error: %s isnt as expected'%(key))
print('Will plot guard_comp')
print(stats_frac_guard_comp.keys())
out_filename = out_name + 'guard_comp.pdf'
out_pathname = os.path.join(out_dir, out_filename)
plot_cdf(stats_frac_guard_comp, out_pathname, 'Anteil der Rendezvous Circuits')
print('Will plot client_guard_desc')
# print(stats_frac_client_located.keys())
out_filename = out_name + 'client_guard_desc.pdf'
out_pathname = os.path.join(out_dir, out_filename)
plot_cdf(stats_client_guard_desc, out_pathname, 'Anteil der Rendezvous Circuits')
# print('Will plot middle_comp')
# print(stats_frac_middle_comp.keys())
# out_filename = out_name + 'middle_comp.pdf'
# out_pathname = os.path.join(out_dir, out_filename)
# plot_cdf(stats_frac_middle_comp, out_pathname, 'Anteil der Rendezvous Circuits')
#
# print('Will plot exit_comp')
# print(stats_frac_exit_comp.keys())
# out_filename = out_name + 'exit_comp.pdf'
# out_pathname = os.path.join(out_dir, out_filename)
# plot_cdf(stats_frac_exit_comp, out_pathname, 'Anteil der Rendezvous Circuits')
#
# print('Will plot gm_comp')
# # print(stats_frac_client_located.keys())
# out_filename = out_name + 'gm_comp.pdf'
# out_pathname = os.path.join(out_dir, out_filename)
# plot_cdf(stats_gm_comp, out_pathname, 'Anteil der Rendezvous Circuits')
#
# print('Will plot ge_comp')
# # print(stats_frac_client_located.keys())
# out_filename = out_name + 'ge_comp.pdf'
# out_pathname = os.path.join(out_dir, out_filename)
# plot_cdf(stats_ge_comp, out_pathname, 'Anteil der Rendezvous Circuits')
#
# print('Will plot em_comp')
# # print(stats_frac_client_located.
# stats_frac_guard_comp[key] = frac_guard_comp
# stats_frac_middle_comp[key] = frac_middle_comp
# stats_frac_exit_comp[key] = frac_exit_comp
# stats_gm_comp[key] = frac_gm_comp
# stats_ge_comp[key] = frac_ge_comp
# stats_em_comp[key] = frac_em_comp
# stats_gme_comp[key] = frac_gme_comp
# stats_gm_ge_comp[key] = frac_gm_ge_comp
#
# # GuardMiddleAttac equals stats_client_gm_loc_timeskeys())
# out_filename = out_name + 'em_comp.pdf'
# out_pathname = os.path.join(out_dir, out_filename)
# plot_cdf(stats_em_comp, out_pathname, 'Anteil der Rendezvous Circuits')
#
# print('Will plot gme_comp')
# # print(stats_frac_client_located.keys())
# out_filename = out_name + 'gme_comp.pdf'
# out_pathname = os.path.join(out_dir, out_filename)
# plot_cdf(stats_gme_comp, out_pathname, 'Anteil der Rendezvous Circuits')
#
# print('Will plot gm_ge_comp')
# # print(stats_frac_client_located.keys())
# out_filename = out_name + 'gm_ge_comp.pdf'
# out_pathname = os.path.join(out_dir, out_filename)
# plot_cdf(stats_gm_ge_comp, out_pathname, 'Anteil der Rendezvous Circuits')
#
# print('Will plot client_gm_loc')
# # print(stats_frac_client_located.keys())
# out_filename = out_name + 'client_gm_loc.pdf'
# out_pathname = os.path.join(out_dir, out_filename)
# plot_cdf(stats_client_gm_loc, out_pathname, 'Anteil der Rendezvous Circuits')
#
# print('Will plot client_g_loc')
# # print(stats_frac_client_located.keys())
# out_filename = out_name + 'client_g_loc.pdf'
# out_pathname = os.path.join(out_dir, out_filename)
# plot_cdf(stats_client_g_loc, out_pathname, 'Anteil der Rendezvous Circuits')
def read_analysis_files(pathnames):
"""Reads in simulation analysis files (as produced by pathsim_analysis.py).
Returns list of start times, end times, and statistics."""
client_compromise_stats = []
hs_compromise_stats = []
location_stats = []
loc_at_t_stats = {}
guardloc_at_t_stats = {}
start_time = None
end_time = None
for pathname in pathnames:
with open(str(pathname), 'rb') as f:
new_start_time = pickle.load(f)
new_end_time = pickle.load(f)
new_client_compromise_stats = pickle.load(f)
new_hs_compromise_stats = pickle.load(f)
new_location_stats = pickle.load(f)
try:
new_loc_at_t_stats = pickle.load(f)
new_loc_at_t_stats = collections.OrderedDict(sorted(new_loc_at_t_stats.items()))
except:
new_loc_at_t_stats = None
try:
new_guardloc_at_t_stats = pickle.load(f)
new_guardloc_at_t_stats = collections.OrderedDict(sorted(new_guardloc_at_t_stats.items()))
except:
new_guardloc_at_t_stats = None
# print(new_loc_at_t_stats.keys()[0:5])
# for i in range(5):
# print(new_loc_at_t_stats.keys()[i]/float(24*60*60))
# print("new_client_compromise_stats:", new_client_compromise_stats[0],'\n')
# print("new_hs_compromise_stats:", new_hs_compromise_stats[0], '\n')
if (new_client_compromise_stats == new_hs_compromise_stats):
raise ValueError('Alert: new_client_compromise_stats equals new_hs_compromise_stats')
# if (new_start_time == new_end_time):
# raise ValueError('Alert: new_client_compromise_stats equals new_hs_compromise_stats')
# print(new_start_time)
if (start_time == None):
start_time = new_start_time
else:
start_time = min(start_time, new_start_time)
if (end_time == None):
end_time = new_end_time
else:
end_time = max(end_time, new_end_time)
client_compromise_stats.extend(new_client_compromise_stats)
hs_compromise_stats.extend(new_hs_compromise_stats)
location_stats.extend(new_location_stats)
if new_loc_at_t_stats is not None:
loc_at_t_stats = Counter(loc_at_t_stats) + Counter(new_loc_at_t_stats)
loc_at_t_stats = collections.OrderedDict(sorted(loc_at_t_stats.items()))
if new_guardloc_at_t_stats is not None:
guardloc_at_t_stats = Counter(guardloc_at_t_stats) + Counter(new_guardloc_at_t_stats)
guardloc_at_t_stats = collections.OrderedDict(sorted(guardloc_at_t_stats.items()))
if (client_compromise_stats == hs_compromise_stats):
raise ValueError('Alert: client_compromise_stats equals hs_compromise_stats')
return (start_time, end_time, client_compromise_stats, hs_compromise_stats, location_stats, loc_at_t_stats, guardloc_at_t_stats)
def compromised_set_plot(pathnames_list, line_labels, out_dir, out_name,
figsize = None, fontsize = 'small',
time_legend_locs = {'guard':'lower right', 'exit':'lower right', 'both':'lower right'},
rate_legend_locs = {'guard':'lower right', 'exit':'lower right', 'both':'lower right'}):
"""Plots cdfs for compromised-set statistics."""
if (line_labels == None): # assume pathnames given as flat list
pathnames_list = [pathnames_list]
# aggregate the stats
client_compromise_stats = {}
hs_compromise_stats = {}
location_stats = {}
loc_at_t_stats = {}
guardloc_at_t_stats = {}
start_times = {}
end_times = {}
ammount_sims = 0
for pathnames in pathnames_list:
print(pathnames)
dirnames = os.path.dirname(pathnames[0]).split('/')[-1]
print(dirnames)
(start_times[dirnames],
end_times[dirnames],
client_compromise_stats[dirnames],
hs_compromise_stats[dirnames],
location_stats[dirnames],
loc_at_t_stats[dirnames],
guardloc_at_t_stats[dirnames]) = read_analysis_files(pathnames)
ammount_sims = max(ammount_sims, len(client_compromise_stats[dirnames]))
# if any(loc_at_t_stats.values()):
# loc_at_t_plot(loc_at_t_stats, ammount_sims, out_dir, (out_name+'_Location_'))
# if any(guardloc_at_t_stats.values()):
# loc_at_t_plot(guardloc_at_t_stats, ammount_sims, out_dir, (out_name+'Guard_Location_'))
comp_set_plot_times(start_times, end_times, client_compromise_stats, out_dir, (out_name+'client_'))
# comp_set_plot_times(start_times, end_times, hs_compromise_stats, out_dir, (out_name+'hs_'))
relay_comp_plot_cdf(client_compromise_stats, out_dir, (out_name+'client_'))
# relay_comp_plot_cdf(hs_compromise_stats, out_dir, (out_name+'hs_'))
if __name__ == '__main__':
usage = 'Usage: comp_rates_plot.py [in_dir] [out_dir] [out_name]: \nTakes \
all files in in_dir, plots their contents according to type, and outputs the results to \
out_dir. out_name is optional; if omitted, it is \
assumed that input filenames are of form x.y.z, and output files will use x for out_name.'
if (len(sys.argv) < 3):
print(usage)
sys.exit(1)
in_dir = sys.argv[1]
out_dir = sys.argv[2]
if (len(sys.argv) > 3):
out_name = sys.argv[3]
else:
out_name = ''
line_labels = None
pathnames = []
for dirpath, dirnames, fnames in os.walk(in_dir):
tmp_pathnames = []
if len(dirnames) != 0:
line_labels = dirnames
# print(line_labels)
line_labels = sorted(line_labels)
# print(line_labels)
else:
if len(fnames) != 0:
for fname in fnames:
# print(fname)
tmp_pathnames.append(os.path.join(dirpath,fname))
tmp_pathnames = sorted(tmp_pathnames)
if line_labels == None:
line_labels = os.path.dirname(dirpath).split('/')[-1]
line_labels = sorted(line_labels)
pathnames.append(tmp_pathnames)
pathnames.sort()
compromised_set_plot(pathnames, line_labels, out_dir, out_name)
| gpl-2.0 |
uhjish/seaborn | seaborn/tests/test_rcmod.py | 11 | 7231 | import numpy as np
import matplotlib as mpl
from distutils.version import LooseVersion
import nose
import matplotlib.pyplot as plt
import nose.tools as nt
import numpy.testing as npt
from .. import rcmod
class RCParamTester(object):
def flatten_list(self, orig_list):
iter_list = map(np.atleast_1d, orig_list)
flat_list = [item for sublist in iter_list for item in sublist]
return flat_list
def assert_rc_params(self, params):
for k, v in params.items():
if k == "svg.embed_char_paths":
# This param causes test issues and is deprecated anyway
continue
elif isinstance(v, np.ndarray):
npt.assert_array_equal(mpl.rcParams[k], v)
else:
nt.assert_equal((k, mpl.rcParams[k]), (k, v))
class TestAxesStyle(RCParamTester):
styles = ["white", "dark", "whitegrid", "darkgrid", "ticks"]
def test_default_return(self):
current = rcmod.axes_style()
self.assert_rc_params(current)
def test_key_usage(self):
_style_keys = set(rcmod._style_keys)
for style in self.styles:
nt.assert_true(not set(rcmod.axes_style(style)) ^ _style_keys)
def test_bad_style(self):
with nt.assert_raises(ValueError):
rcmod.axes_style("i_am_not_a_style")
def test_rc_override(self):
rc = {"axes.facecolor": "blue", "foo.notaparam": "bar"}
out = rcmod.axes_style("darkgrid", rc)
nt.assert_equal(out["axes.facecolor"], "blue")
nt.assert_not_in("foo.notaparam", out)
def test_set_style(self):
for style in self.styles:
style_dict = rcmod.axes_style(style)
rcmod.set_style(style)
self.assert_rc_params(style_dict)
def test_style_context_manager(self):
rcmod.set_style("darkgrid")
orig_params = rcmod.axes_style()
with rcmod.axes_style("whitegrid"):
context_params = rcmod.axes_style("whitegrid")
self.assert_rc_params(context_params)
self.assert_rc_params(orig_params)
def test_style_context_independence(self):
nt.assert_true(set(rcmod._style_keys) ^ set(rcmod._context_keys))
def test_set_rc(self):
rcmod.set(rc={"lines.linewidth": 4})
nt.assert_equal(mpl.rcParams["lines.linewidth"], 4)
rcmod.set()
def test_reset_defaults(self):
# Changes to the rc parameters make this test hard to manage
# on older versions of matplotlib, so we'll skip it
if LooseVersion(mpl.__version__) < LooseVersion("1.3"):
raise nose.SkipTest
rcmod.reset_defaults()
self.assert_rc_params(mpl.rcParamsDefault)
rcmod.set()
def test_reset_orig(self):
# Changes to the rc parameters make this test hard to manage
# on older versions of matplotlib, so we'll skip it
if LooseVersion(mpl.__version__) < LooseVersion("1.3"):
raise nose.SkipTest
rcmod.reset_orig()
self.assert_rc_params(mpl.rcParamsOrig)
rcmod.set()
class TestPlottingContext(RCParamTester):
contexts = ["paper", "notebook", "talk", "poster"]
def test_default_return(self):
current = rcmod.plotting_context()
self.assert_rc_params(current)
def test_key_usage(self):
_context_keys = set(rcmod._context_keys)
for context in self.contexts:
missing = set(rcmod.plotting_context(context)) ^ _context_keys
nt.assert_true(not missing)
def test_bad_context(self):
with nt.assert_raises(ValueError):
rcmod.plotting_context("i_am_not_a_context")
def test_font_scale(self):
notebook_ref = rcmod.plotting_context("notebook")
notebook_big = rcmod.plotting_context("notebook", 2)
font_keys = ["axes.labelsize", "axes.titlesize", "legend.fontsize",
"xtick.labelsize", "ytick.labelsize", "font.size"]
for k in font_keys:
nt.assert_equal(notebook_ref[k] * 2, notebook_big[k])
def test_rc_override(self):
key, val = "grid.linewidth", 5
rc = {key: val, "foo": "bar"}
out = rcmod.plotting_context("talk", rc=rc)
nt.assert_equal(out[key], val)
nt.assert_not_in("foo", out)
def test_set_context(self):
for context in self.contexts:
context_dict = rcmod.plotting_context(context)
rcmod.set_context(context)
self.assert_rc_params(context_dict)
def test_context_context_manager(self):
rcmod.set_context("notebook")
orig_params = rcmod.plotting_context()
with rcmod.plotting_context("paper"):
context_params = rcmod.plotting_context("paper")
self.assert_rc_params(context_params)
self.assert_rc_params(orig_params)
class TestFonts(object):
def test_set_font(self):
rcmod.set(font="Verdana")
_, ax = plt.subplots()
ax.set_xlabel("foo")
try:
nt.assert_equal(ax.xaxis.label.get_fontname(),
"Verdana")
except AssertionError:
if has_verdana():
raise
else:
raise nose.SkipTest("Verdana font is not present")
finally:
rcmod.set()
plt.close("all")
def test_set_serif_font(self):
rcmod.set(font="serif")
_, ax = plt.subplots()
ax.set_xlabel("foo")
nt.assert_in(ax.xaxis.label.get_fontname(),
mpl.rcParams["font.serif"])
rcmod.set()
plt.close("all")
def test_different_sans_serif(self):
if LooseVersion(mpl.__version__) < LooseVersion("1.4"):
raise nose.SkipTest
rcmod.set()
rcmod.set_style(rc={"font.sans-serif":
["Verdana"]})
_, ax = plt.subplots()
ax.set_xlabel("foo")
try:
nt.assert_equal(ax.xaxis.label.get_fontname(),
"Verdana")
except AssertionError:
if has_verdana():
raise
else:
raise nose.SkipTest("Verdana font is not present")
finally:
rcmod.set()
plt.close("all")
def has_verdana():
"""Helper to verify if Verdana font is present"""
# This import is relatively lengthy, so to prevent its import for
# testing other tests in this module not requiring this knowledge,
# import font_manager here
import matplotlib.font_manager as mplfm
try:
verdana_font = mplfm.findfont('Verdana', fallback_to_default=False)
except:
# if https://github.com/matplotlib/matplotlib/pull/3435
# gets accepted
return False
# otherwise check if not matching the logic for a 'default' one
try:
unlikely_font = mplfm.findfont("very_unlikely_to_exist1234",
fallback_to_default=False)
except:
# if matched verdana but not unlikely, Verdana must exist
return True
# otherwise -- if they match, must be the same default
return verdana_font != unlikely_font
| bsd-3-clause |
RayMick/scikit-learn | sklearn/discriminant_analysis.py | 32 | 27308 | """
Linear Discriminant Analysis and Quadratic Discriminant Analysis
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .externals.six.moves import xrange
from .base import BaseEstimator, TransformerMixin, ClassifierMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .preprocessing import StandardScaler
__all__ = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = sc.std_ * ledoit_wolf(X)[0] * sc.std_ # scale back
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LinearDiscriminantAnalysis(BaseEstimator, LinearClassifierMixin,
TransformerMixin):
"""Linear Discriminant Analysis
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default). Does not compute the
covariance matrix, therefore this solver is recommended for
data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation in SVD solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0. Only available when eigen
solver is used.
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis: Quadratic
Discriminant Analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LinearDiscriminantAnalysis()
>>> clf.fit(X, y)
LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None,
solver='svd', store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals))[::-1]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > self.tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=None, tol=None):
"""Fit LinearDiscriminantAnalysis model according to the given
training data and parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("The parameter 'store_covariance' is deprecated as "
"of version 0.17 and will be removed in 0.19. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
self.store_covariance = store_covariance
if tol:
warnings.warn("The parameter 'tol' is deprecated as of version "
"0.17 and will be removed in 0.19. The parameter is "
"no longer necessary because the value is set via "
"the estimator initialisation or set_params method.",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y, ensure_min_samples=2)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError("priors must be non-negative")
if self.priors_.sum() != 1:
warnings.warn("The priors do not sum to 1. Renormalizing",
UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
class QuadraticDiscriminantAnalysis(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
Examples
--------
>>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QuadraticDiscriminantAnalysis()
>>> clf.fit(X, y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
QuadraticDiscriminantAnalysis(priors=None, reg_param=0.0,
store_covariances=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.discriminant_analysis.LinearDiscriminantAnalysis: Linear
Discriminant Analysis
"""
def __init__(self, priors=None, reg_param=0., store_covariances=False,
tol=1.0e-4):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
self.store_covariances = store_covariances
self.tol = tol
def fit(self, X, y, store_covariances=None, tol=None):
"""Fit the model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
"""
if store_covariances:
warnings.warn("The parameter 'store_covariances' is deprecated as "
"of version 0.17 and will be removed in 0.19. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
self.store_covariances = store_covariances
if tol:
warnings.warn("The parameter 'tol' is deprecated as of version "
"0.17 and will be removed in 0.19. The parameter is "
"no longer necessary because the value is set via "
"the estimator initialisation or set_params method.",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if self.store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if self.store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if self.store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
capitalk/treelearn | treelearn/viterbi_tree.py | 1 | 3128 | # TreeLearn
#
# Copyright (C) Capital K Partners
# Author: Alex Rubinsteyn
# Contact: alex [at] capitalkpartners [dot] com
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
from sklearn.base import BaseEstimator
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
class ViterbiTreeNode(BaseEstimator):
def __init__(self, depth, max_depth, num_retries, leaf_model):
self.depth = depth
self.max_depth = max_depth
self.is_leaf = (depth == max_depth)
self.num_retries = num_retries
self.C = C
if depth == max_depth:
self.leaf_model = leaf_model
else:
self.left = ViterbiTreeNode(depth+1, max_depth, num_retries, leaf_model)
self.right = ViterbiTreeNode(depth+1, max_depth, num_retries, leaf_model)
def gen_random_cs(self):
return 10 ** (np.random.randn(self.num_retries) - 1)
def init_fit(self, X,Y):
"""Initialize partitions and leaf models to minimize training error"""
best_model = None
best_error = np.inf
for c in self.gen_random_cs():
if self.is_leaf:
model = self.leaf_model(C=c)
else:
model = LinearSVC(C=c)
model.fit(X,Y)
error = model.score(X,Y)
if err < best_error:
best_model = model
best_error = error
self.model = best_model
if not self.is_leaf:
pred = model.predict(X)
mask = (pred != 1)
self.left.init_fit(X[mask, :], Y[mask])
self.right.init_fit(X[~mask, :], Y[~mask])
def refit_partition(X,partition,Y):
"""Assumes that 'init_fit' has already been run."""
if self.is_leaf:
self.model.fit(X,Y)
else:
nrows = X.shape[0]
# get probabilities of y=1
left_prob = self.left.predict_proba(X)[:, 1]
right_prob = self.right.predict_proba(X)[:, 1]
assignments = np.zeros(nrows)
right_mask = (left_prob < right_prob) & Y == 1
# TODO
# assignments[]
def refit_leaves(X,Y):
# TODO
pass
def predict(X):
# TODO
pass
class ViterbiTree(BaseEstimator):
def __init__(self, max_depth=3, num_retries = 3, leaf_model=LogisticRegression):
self.root = ViterbiTreeNode(1, max_depth, num_retries, leaf_model)
def fit(self, X, Y):
self.root.init_fit(X,Y)
def predict(self, X)
return self.root.predict(X)
| lgpl-3.0 |
abhisg/scikit-learn | examples/model_selection/grid_search_text_feature_extraction.py | 253 | 4158 | """
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <[email protected]>
# Peter Prettenhofer <[email protected]>
# Mathieu Blondel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
| bsd-3-clause |
clemkoa/scikit-learn | sklearn/datasets/tests/test_mldata.py | 22 | 5181 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
setup_tmpdata()
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
teardown_tmpdata()
def test_fetch_one_column():
setup_tmpdata()
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
teardown_tmpdata()
def test_fetch_multiple_column():
setup_tmpdata()
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
teardown_tmpdata()
| bsd-3-clause |
toastedcornflakes/scikit-learn | examples/classification/plot_digits_classification.py | 34 | 2409 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 4 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# matplotlib.pyplot.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
sunzhxjs/JobGIS | lib/python2.7/site-packages/pandas/io/common.py | 9 | 14607 | """Common IO api utilities"""
import sys
import os
import csv
import codecs
import zipfile
from contextlib import contextmanager, closing
from pandas.compat import StringIO, BytesIO, string_types, text_type
from pandas import compat
from pandas.core.common import pprint_thing, is_number
try:
import pathlib
_PATHLIB_INSTALLED = True
except ImportError:
_PATHLIB_INSTALLED = False
try:
from py.path import local as LocalPath
_PY_PATH_INSTALLED = True
except:
_PY_PATH_INSTALLED = False
if compat.PY3:
from urllib.request import urlopen, pathname2url
_urlopen = urlopen
from urllib.parse import urlparse as parse_url
import urllib.parse as compat_parse
from urllib.parse import (uses_relative, uses_netloc, uses_params,
urlencode, urljoin)
from urllib.error import URLError
from http.client import HTTPException
else:
from urllib2 import urlopen as _urlopen
from urllib import urlencode, pathname2url
from urlparse import urlparse as parse_url
from urlparse import uses_relative, uses_netloc, uses_params, urljoin
from urllib2 import URLError
from httplib import HTTPException
from contextlib import contextmanager, closing
from functools import wraps
# @wraps(_urlopen)
@contextmanager
def urlopen(*args, **kwargs):
with closing(_urlopen(*args, **kwargs)) as f:
yield f
_VALID_URLS = set(uses_relative + uses_netloc + uses_params)
_VALID_URLS.discard('')
class PerformanceWarning(Warning):
pass
class DtypeWarning(Warning):
pass
try:
from boto.s3 import key
class BotoFileLikeReader(key.Key):
"""boto Key modified to be more file-like
This modification of the boto Key will read through a supplied
S3 key once, then stop. The unmodified boto Key object will repeatedly
cycle through a file in S3: after reaching the end of the file,
boto will close the file. Then the next call to `read` or `next` will
re-open the file and start reading from the beginning.
Also adds a `readline` function which will split the returned
values by the `\n` character.
"""
def __init__(self, *args, **kwargs):
encoding = kwargs.pop("encoding", None) # Python 2 compat
super(BotoFileLikeReader, self).__init__(*args, **kwargs)
self.finished_read = False # Add a flag to mark the end of the read.
self.buffer = ""
self.lines = []
if encoding is None and compat.PY3:
encoding = "utf-8"
self.encoding = encoding
self.lines = []
def next(self):
return self.readline()
__next__ = next
def read(self, *args, **kwargs):
if self.finished_read:
return b'' if compat.PY3 else ''
return super(BotoFileLikeReader, self).read(*args, **kwargs)
def close(self, *args, **kwargs):
self.finished_read = True
return super(BotoFileLikeReader, self).close(*args, **kwargs)
def seekable(self):
"""Needed for reading by bz2"""
return False
def readline(self):
"""Split the contents of the Key by '\n' characters."""
if self.lines:
retval = self.lines[0]
self.lines = self.lines[1:]
return retval
if self.finished_read:
if self.buffer:
retval, self.buffer = self.buffer, ""
return retval
else:
raise StopIteration
if self.encoding:
self.buffer = "{}{}".format(self.buffer, self.read(8192).decode(self.encoding))
else:
self.buffer = "{}{}".format(self.buffer, self.read(8192))
split_buffer = self.buffer.split("\n")
self.lines.extend(split_buffer[:-1])
self.buffer = split_buffer[-1]
return self.readline()
except ImportError:
# boto is only needed for reading from S3.
pass
def _is_url(url):
"""Check to see if a URL has a valid protocol.
Parameters
----------
url : str or unicode
Returns
-------
isurl : bool
If `url` has a valid protocol return True otherwise False.
"""
try:
return parse_url(url).scheme in _VALID_URLS
except:
return False
def _is_s3_url(url):
"""Check for an s3, s3n, or s3a url"""
try:
return parse_url(url).scheme in ['s3', 's3n', 's3a']
except:
return False
def maybe_read_encoded_stream(reader, encoding=None, compression=None):
"""read an encoded stream from the reader and transform the bytes to
unicode if required based on the encoding
Parameters
----------
reader : a streamable file-like object
encoding : optional, the encoding to attempt to read
Returns
-------
a tuple of (a stream of decoded bytes, the encoding which was used)
"""
if compat.PY3 or encoding is not None: # pragma: no cover
if encoding:
errors = 'strict'
else:
errors = 'replace'
encoding = 'utf-8'
if compression == 'gzip':
reader = BytesIO(reader.read())
else:
reader = StringIO(reader.read().decode(encoding, errors))
else:
if compression == 'gzip':
reader = BytesIO(reader.read())
encoding = None
return reader, encoding
def _expand_user(filepath_or_buffer):
"""Return the argument with an initial component of ~ or ~user
replaced by that user's home directory.
Parameters
----------
filepath_or_buffer : object to be converted if possible
Returns
-------
expanded_filepath_or_buffer : an expanded filepath or the
input if not expandable
"""
if isinstance(filepath_or_buffer, string_types):
return os.path.expanduser(filepath_or_buffer)
return filepath_or_buffer
def _validate_header_arg(header):
if isinstance(header, bool):
raise TypeError("Passing a bool to header is invalid. "
"Use header=None for no header or "
"header=int or list-like of ints to specify "
"the row(s) making up the column names")
def _stringify_path(filepath_or_buffer):
"""Return the argument coerced to a string if it was a pathlib.Path
or a py.path.local
Parameters
----------
filepath_or_buffer : object to be converted
Returns
-------
str_filepath_or_buffer : a the string version of the input path
"""
if _PATHLIB_INSTALLED and isinstance(filepath_or_buffer, pathlib.Path):
return text_type(filepath_or_buffer)
if _PY_PATH_INSTALLED and isinstance(filepath_or_buffer, LocalPath):
return filepath_or_buffer.strpath
return filepath_or_buffer
def get_filepath_or_buffer(filepath_or_buffer, encoding=None,
compression=None):
"""
If the filepath_or_buffer is a url, translate and return the buffer
passthru otherwise.
Parameters
----------
filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path),
or buffer
encoding : the encoding to use to decode py3 bytes, default is 'utf-8'
Returns
-------
a filepath_or_buffer, the encoding, the compression
"""
if _is_url(filepath_or_buffer):
req = _urlopen(str(filepath_or_buffer))
if compression == 'infer':
content_encoding = req.headers.get('Content-Encoding', None)
if content_encoding == 'gzip':
compression = 'gzip'
else:
compression = None
# cat on the compression to the tuple returned by the function
to_return = list(maybe_read_encoded_stream(req, encoding, compression)) + \
[compression]
return tuple(to_return)
if _is_s3_url(filepath_or_buffer):
try:
import boto
except:
raise ImportError("boto is required to handle s3 files")
# Assuming AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
# are environment variables
parsed_url = parse_url(filepath_or_buffer)
try:
conn = boto.connect_s3()
except boto.exception.NoAuthHandlerFound:
conn = boto.connect_s3(anon=True)
b = conn.get_bucket(parsed_url.netloc, validate=False)
if compat.PY2 and (compression == 'gzip' or
(compression == 'infer' and
filepath_or_buffer.endswith(".gz"))):
k = boto.s3.key.Key(b, parsed_url.path)
filepath_or_buffer = BytesIO(k.get_contents_as_string(
encoding=encoding))
else:
k = BotoFileLikeReader(b, parsed_url.path, encoding=encoding)
k.open('r') # Expose read errors immediately
filepath_or_buffer = k
return filepath_or_buffer, None, compression
# It is a pathlib.Path/py.path.local or string
filepath_or_buffer = _stringify_path(filepath_or_buffer)
return _expand_user(filepath_or_buffer), None, compression
def file_path_to_url(path):
"""
converts an absolute native path to a FILE URL.
Parameters
----------
path : a path in native format
Returns
-------
a valid FILE URL
"""
return urljoin('file:', pathname2url(path))
# ZipFile is not a context manager for <= 2.6
# must be tuple index here since 2.6 doesn't use namedtuple for version_info
if sys.version_info[1] <= 6:
@contextmanager
def ZipFile(*args, **kwargs):
with closing(zipfile.ZipFile(*args, **kwargs)) as zf:
yield zf
else:
ZipFile = zipfile.ZipFile
def _get_handle(path, mode, encoding=None, compression=None):
"""Gets file handle for given path and mode.
"""
if compression is not None:
if encoding is not None and not compat.PY3:
msg = 'encoding + compression not yet supported in Python 2'
raise ValueError(msg)
if compression == 'gzip':
import gzip
f = gzip.GzipFile(path, mode)
elif compression == 'bz2':
import bz2
f = bz2.BZ2File(path, mode)
else:
raise ValueError('Unrecognized compression type: %s' %
compression)
if compat.PY3:
from io import TextIOWrapper
f = TextIOWrapper(f, encoding=encoding)
return f
else:
if compat.PY3:
if encoding:
f = open(path, mode, encoding=encoding)
else:
f = open(path, mode, errors='replace')
else:
f = open(path, mode)
return f
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def read(self, bytes=-1):
return self.reader.read(bytes).encode("utf-8")
def readline(self):
return self.reader.readline().encode("utf-8")
def next(self):
return next(self.reader).encode("utf-8")
# Python 3 iterator
__next__ = next
if compat.PY3: # pragma: no cover
def UnicodeReader(f, dialect=csv.excel, encoding="utf-8", **kwds):
# ignore encoding
return csv.reader(f, dialect=dialect, **kwds)
def UnicodeWriter(f, dialect=csv.excel, encoding="utf-8", **kwds):
return csv.writer(f, dialect=dialect, **kwds)
else:
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
On Python 3, this is replaced (below) by csv.reader, which handles
unicode.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = next(self.reader)
return [compat.text_type(s, "utf-8") for s in row]
# python 3 iterator
__next__ = next
def __iter__(self): # pragma: no cover
return self
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
self.quoting = kwds.get("quoting", None)
def writerow(self, row):
def _check_as_is(x):
return (self.quoting == csv.QUOTE_NONNUMERIC and
is_number(x)) or isinstance(x, str)
row = [x if _check_as_is(x)
else pprint_thing(x).encode("utf-8") for x in row]
self.writer.writerow([s for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
def _check_as_is(x):
return (self.quoting == csv.QUOTE_NONNUMERIC and
is_number(x)) or isinstance(x, str)
for i, row in enumerate(rows):
rows[i] = [x if _check_as_is(x)
else pprint_thing(x).encode("utf-8") for x in row]
self.writer.writerows([[s for s in row] for row in rows])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0) | mit |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/matplotlib/backends/backend_gtk3agg.py | 6 | 3144 | import cairo
import numpy as np
import sys
import warnings
import backend_agg
import backend_gtk3
from matplotlib.figure import Figure
from matplotlib import transforms
if sys.version_info[0] >= 3:
warnings.warn("The Gtk3Agg backend is not known to work on Python 3.x.")
class FigureCanvasGTK3Agg(backend_gtk3.FigureCanvasGTK3,
backend_agg.FigureCanvasAgg):
def __init__(self, figure):
backend_gtk3.FigureCanvasGTK3.__init__(self, figure)
self._bbox_queue = []
def _renderer_init(self):
pass
def _render_figure(self, width, height):
backend_agg.FigureCanvasAgg.draw(self)
def on_draw_event(self, widget, ctx):
""" GtkDrawable draw event, like expose_event in GTK 2.X
"""
allocation = self.get_allocation()
w, h = allocation.width, allocation.height
if not len(self._bbox_queue):
if self._need_redraw:
self._render_figure(w, h)
bbox_queue = [transforms.Bbox([[0, 0], [w, h]])]
else:
return
else:
bbox_queue = self._bbox_queue
for bbox in bbox_queue:
area = self.copy_from_bbox(bbox)
buf = np.fromstring(area.to_string_argb(), dtype='uint8')
x = int(bbox.x0)
y = h - int(bbox.y1)
width = int(bbox.x1) - int(bbox.x0)
height = int(bbox.y1) - int(bbox.y0)
image = cairo.ImageSurface.create_for_data(
buf, cairo.FORMAT_ARGB32, width, height)
ctx.set_source_surface(image, x, y)
ctx.paint()
if len(self._bbox_queue):
self._bbox_queue = []
return False
def blit(self, bbox=None):
# If bbox is None, blit the entire canvas to gtk. Otherwise
# blit only the area defined by the bbox.
if bbox is None:
bbox = self.figure.bbox
allocation = self.get_allocation()
w, h = allocation.width, allocation.height
x = int(bbox.x0)
y = h - int(bbox.y1)
width = int(bbox.x1) - int(bbox.x0)
height = int(bbox.y1) - int(bbox.y0)
self._bbox_queue.append(bbox)
self.queue_draw_area(x, y, width, height)
def print_png(self, filename, *args, **kwargs):
# Do this so we can save the resolution of figure in the PNG file
agg = self.switch_backends(backend_agg.FigureCanvasAgg)
return agg.print_png(filename, *args, **kwargs)
class FigureManagerGTK3Agg(backend_gtk3.FigureManagerGTK3):
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasGTK3Agg(figure)
manager = FigureManagerGTK3Agg(canvas, num)
return manager
FigureManager = FigureManagerGTK3Agg
show = backend_gtk3.show
| mit |
slinderman/pyhsmm_spiketrains | experiments/make_figure10.py | 1 | 6661 | """
Make figure 6, which includes
1. A plot of the centerpoints of all states
2. A plot of the top three latent state maps
3. A plot of the true and reconstructed locations
"""
import os
import cPickle
import gzip
from collections import namedtuple
import numpy as np
from scipy.io import loadmat
import matplotlib
import matplotlib.patches
matplotlib.rcParams.update({'font.sans-serif' : 'Helvetica',
'axes.labelsize': 9,
'xtick.labelsize' : 9,
'ytick.labelsize' : 9,
'axes.titlesize' : 11})
import brewer2mpl
allcolors = brewer2mpl.get_map("Set1", "Qualitative", 9).mpl_colors
from pyhsmm_spiketrains.internals.utils import split_train_test, convert_polar_to_xy
Results = namedtuple(
"Results", ["name", "loglikes", "predictive_lls",
"N_used", "alphas", "gammas",
"rates", "obs_hypers",
"samples", "timestamps"])
from hips.plotting.layout import *
from hips.plotting.colormaps import white_to_color_cmap
from hips.distributions.circular_distribution import CircularDistribution
def make_figure(results, S_train, pos_train, S_test, pos_test, center, radius, figdir="."):
model = results.samples
model.relabel_by_usage()
N_used = results.N_used[-1]
stateseq = model.stateseqs[0]
occupancy = model.state_usages
T_test = S_test.shape[0]
t_test = np.arange(T_test) * 0.25
fig = create_figure(figsize=(5,3))
# Plot the centers of the latent states
ax = create_axis_at_location(fig, .1, 1.7, 1., 1., transparent=True)
# plt.figtext(0.05/5, 2.8/3, "A")
remove_plot_labels(ax)
circle = matplotlib.patches.Circle(xy=[0,0],
radius= radius,
linewidth=1,
edgecolor="k",
facecolor="white")
ax.add_patch(circle)
to_plot = np.array([0, 1, 2, 3, 25, 29, 31])
# plt.figtext(1.2/5, 2.8/3, "B")
# for k in xrange(N_used-1,-1,-1):
for k in xrange(N_used):
relocc = occupancy[k] / np.float(np.amax(occupancy))
cd = CircularDistribution(center, radius)
cd.fit_xy(pos_train[stateseq==k,0], pos_train[stateseq==k,1])
# import pdb; pdb.set_trace()
rm, thm = cd.mean
xm,ym = convert_polar_to_xy(np.array([[rm, thm]]), [0,0])
# Figure out color
if k in to_plot:
k_ind = np.where(to_plot==k)[0][0]
color = allcolors[k_ind]
else:
color = 'k'
ax.plot(xm,ym,'o',
markersize=3+relocc*4,
markerfacecolor=color,
# markeredgecolor=color,
markeredgecolor='k',
markeredgewidth=1)
ax.set_xlim(-radius, radius)
ax.set_ylim(-radius, radius)
ax.set_title('All states', fontdict={'fontsize' : 9})
# Plot a figure for each latent state
print np.row_stack((np.arange(N_used),
np.array([dd.r for dd in model.dur_distns[:N_used]])))
dur = np.arange(1,16)
yticks = [0, 0.2, 0.4]
for k in xrange(3):
left = 1.45 + 1.1*k + 0.1
color = allcolors[k]
# Plot the locations of this state
ax = create_axis_at_location(fig, left, 1.8, 1., .9, transparent=True)
# remove_plot_labels(ax)
# # Plot the empirical location distribution
# cd = CircularDistribution(center, radius)
# cd.fit_xy(pos_train[stateseq==k,0], pos_train[stateseq==k,1])
# cd.plot(ax=ax, cmap=cmap, plot_data=True, plot_colorbar=False)
dur_distn = model.dur_distns[to_plot[k]]
ax.bar(dur, np.exp(dur_distn.log_pmf(dur)), width=1, color=color)
ax.set_xticks([1, 5, 10, 15])
ax.set_yticks(yticks)
if k > 0:
ax.set_yticklabels([])
else:
ax.set_ylabel("Duration Prob.", labelpad=0)
ax.set_xlim(1,16)
ax.set_xlabel("Duration", labelpad=0)
ax.set_title('State %d (%.1f%%)' % (to_plot[k]+1, 100.*occupancy[to_plot[k]]),
fontdict={'fontsize' : 9})
# Bottom row
for k in xrange(3,7):
left = .35 + 1.1*(k-3) + 0.1
color = allcolors[k]
# Plot the locations of this state
ax = create_axis_at_location(fig, left, .4, 1., .9, transparent=True)
# remove_plot_labels(ax)
# # Plot the empirical location distribution
# cd = CircularDistribution(center, radius)
# cd.fit_xy(pos_train[stateseq==k,0], pos_train[stateseq==k,1])
# cd.plot(ax=ax, cmap=cmap, plot_data=True, plot_colorbar=False)
dur_distn = model.dur_distns[to_plot[k]]
ax.bar(dur, np.exp(dur_distn.log_pmf(dur)), width=1, color=color)
ax.set_xticks([1, 5, 10, 15])
ax.set_yticks(yticks)
if k > 3:
ax.set_yticklabels([])
else:
ax.set_ylabel("Duration Prob.", labelpad=0)
ax.set_xlim(1,16)
ax.set_xlabel("Duration", labelpad=0)
ax.set_title('State %d (%.1f%%)' % (to_plot[k]+1, 100.*occupancy[to_plot[k]]),
fontdict={'fontsize' : 9})
fig.savefig(os.path.join(figdir, 'figure10.pdf'))
fig.savefig(os.path.join(figdir, 'figure10.png'), dpi=300)
plt.show()
def load_hipp_data(dataname="hipp_2dtrack_a", trainfrac=0.8):
raw_data = loadmat("data/%s.mat" % dataname)
S = raw_data['S'].astype(np.int).copy("C")
# Get the time stamps
T,N = S.shape
dt = 0.25
ts = np.arange(T) * dt
# Get the corresponding position
pos = raw_data['pos']
S_train, pos_train, S_test, pos_test = split_train_test(S, pos, trainfrac=trainfrac)
if "cp" in raw_data and "r" in raw_data:
center = raw_data['cp'].ravel()
radius = np.float(raw_data['r'])
else:
center = radius = None
return N, S_train, pos_train, S_test, pos_test, center, radius
if __name__ == "__main__":
# Load the data
dataset = "hipp_2dtrack_a"
N, S_train, pos_train, S_test, pos_test, center, radius = \
load_hipp_data(dataname=dataset)
# Load results
runnum = 1
results_dir = os.path.join("results", dataset, "run%03d" % runnum)
results_type = "hdphsmm_scale"
results_file = os.path.join(results_dir, results_type + ".pkl.gz")
with gzip.open(results_file, "r") as f:
results = cPickle.load(f)
make_figure(results,
S_train, pos_train,
S_test, pos_test,
center, radius,
figdir=results_dir)
| mit |
lakshayg/tensorflow | tensorflow/contrib/learn/python/learn/estimators/debug_test.py | 46 | 32817 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Debug estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import operator
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import debug
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
NUM_EXAMPLES = 100
N_CLASSES = 5 # Cardinality of multiclass labels.
LABEL_DIMENSION = 3 # Dimensionality of regression labels.
def _train_test_split(features_and_labels):
features, labels = features_and_labels
train_set = (features[:int(len(features) / 2)], labels[:int(len(features) / 2)])
test_set = (features[int(len(features) / 2):], labels[int(len(features) / 2):])
return train_set, test_set
def _input_fn_builder(features, labels):
def input_fn():
feature_dict = {'features': constant_op.constant(features)}
my_labels = labels
if my_labels is not None:
my_labels = constant_op.constant(my_labels)
return feature_dict, my_labels
return input_fn
class DebugClassifierTest(test.TestCase):
def setUp(self):
np.random.seed(100)
self.features = np.random.rand(NUM_EXAMPLES, 5)
self.labels = np.random.choice(
range(N_CLASSES), p=[0.1, 0.3, 0.4, 0.1, 0.1], size=NUM_EXAMPLES)
self.binary_labels = np.random.choice(
range(2), p=[0.2, 0.8], size=NUM_EXAMPLES)
self.binary_float_labels = np.random.choice(
range(2), p=[0.2, 0.8], size=NUM_EXAMPLES)
def testPredict(self):
"""Tests that DebugClassifier outputs the majority class."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.labels])
majority_class, _ = max(collections.Counter(train_labels).items(),
key=operator.itemgetter(1))
expected_prediction = np.vstack(
[[majority_class] for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=N_CLASSES)
classifier.fit(input_fn=_input_fn_builder(train_features, train_labels),
steps=50)
pred = classifier.predict_classes(input_fn=_input_fn_builder(test_features,
None))
self.assertAllEqual(expected_prediction, np.vstack(pred))
def testPredictBinary(self):
"""Same as above for binary predictions."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.binary_labels])
majority_class, _ = max(collections.Counter(train_labels).items(),
key=operator.itemgetter(1))
expected_prediction = np.vstack(
[[majority_class] for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_builder(train_features, train_labels),
steps=50)
pred = classifier.predict_classes(input_fn=_input_fn_builder(test_features,
None))
self.assertAllEqual(expected_prediction, np.vstack(pred))
(train_features, train_labels), (
test_features, test_labels) = _train_test_split(
[self.features, self.binary_float_labels])
majority_class, _ = max(collections.Counter(train_labels).items(),
key=operator.itemgetter(1))
expected_prediction = np.vstack(
[[majority_class] for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_builder(train_features, train_labels),
steps=50)
pred = classifier.predict_classes(input_fn=_input_fn_builder(test_features,
None))
self.assertAllEqual(expected_prediction, np.vstack(pred))
def testPredictProba(self):
"""Tests that DebugClassifier outputs observed class distribution."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.labels])
class_distribution = np.zeros((1, N_CLASSES))
for label in train_labels:
class_distribution[0, label] += 1
class_distribution /= len(train_labels)
expected_prediction = np.vstack(
[class_distribution for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=N_CLASSES)
classifier.fit(input_fn=_input_fn_builder(train_features, train_labels),
steps=50)
pred = classifier.predict_proba(
input_fn=_input_fn_builder(test_features, None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
def testPredictProbaBinary(self):
"""Same as above but for binary classification."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.binary_labels])
class_distribution = np.zeros((1, 2))
for label in train_labels:
class_distribution[0, label] += 1
class_distribution /= len(train_labels)
expected_prediction = np.vstack(
[class_distribution for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_builder(train_features, train_labels),
steps=50)
pred = classifier.predict_proba(
input_fn=_input_fn_builder(test_features, None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
(train_features, train_labels), (
test_features, test_labels) = _train_test_split(
[self.features, self.binary_float_labels])
class_distribution = np.zeros((1, 2))
for label in train_labels:
class_distribution[0, int(label)] += 1
class_distribution /= len(train_labels)
expected_prediction = np.vstack(
[class_distribution for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_builder(train_features, train_labels),
steps=50)
pred = classifier.predict_proba(
input_fn=_input_fn_builder(test_features, None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=debug.DebugClassifier(n_classes=3),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, debug.DebugClassifier)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
classifier.fit(input_fn=input_fn, steps=5)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
classifier = debug.DebugClassifier(config=run_config.RunConfig(
tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=5)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def _assertBinaryPredictions(self, expected_len, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, (0, 1))
def _assertProbabilities(self, expected_batch_size, expected_n_classes,
probabilities):
self.assertEqual(expected_batch_size, len(probabilities))
for b in range(expected_batch_size):
self.assertEqual(expected_n_classes, len(probabilities[b]))
for i in range(expected_n_classes):
self._assertInRange(0.0, 1.0, probabilities[b][i])
def testLogisticRegression_TensorData(self):
"""Tests binary classification using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(classifier.predict_classes(input_fn=predict_input_fn))
self._assertBinaryPredictions(3, predictions)
def testLogisticRegression_FloatLabel(self):
"""Tests binary classification with float labels."""
def _input_fn_float_label(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[50], [20], [10]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_float_label, steps=50)
predict_input_fn = functools.partial(_input_fn_float_label, num_epochs=1)
predictions = list(classifier.predict_classes(input_fn=predict_input_fn))
self._assertBinaryPredictions(3, predictions)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn))
self._assertProbabilities(3, 2, predictions_proba)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
classifier = debug.DebugClassifier(n_classes=3)
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=200)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
classifier = debug.DebugClassifier(n_classes=3)
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
classifier = debug.DebugClassifier(n_classes=3)
classifier.fit(x=train_x, y=train_y, steps=200)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_StringLabel(self):
"""Tests multi-class classification with string labels."""
def _input_fn_train():
labels = constant_op.constant([['foo'], ['bar'], ['baz'], ['bar']])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
}
return features, labels
classifier = debug.DebugClassifier(
n_classes=3, label_keys=['foo', 'bar', 'baz'])
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
classifier = debug.DebugClassifier(
weight_column_name='w',
n_classes=2,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = debug.DebugClassifier(weight_column_name='w')
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
labels = math_ops.cast(labels, predictions.dtype)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(
list(classifier.predict_classes(input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
model_dir = tempfile.mkdtemp()
classifier = debug.DebugClassifier(
model_dir=model_dir,
n_classes=3,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions1 = classifier.predict_classes(input_fn=predict_input_fn)
del classifier
classifier2 = debug.DebugClassifier(
model_dir=model_dir,
n_classes=3,
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = classifier2.predict_classes(input_fn=predict_input_fn)
self.assertEqual(list(predictions1), list(predictions2))
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
feature_columns = [
feature_column.real_valued_column('age'),
feature_column.embedding_column(
language, dimension=1)
]
classifier = debug.DebugClassifier(config=run_config.RunConfig(
tf_random_seed=1))
classifier.fit(input_fn=input_fn, steps=5)
def default_input_fn(unused_estimator, examples):
return feature_column_ops.parse_feature_columns_from_examples(
examples, feature_columns)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir, input_fn=default_input_fn)
class DebugRegressorTest(test.TestCase):
def setUp(self):
np.random.seed(100)
self.features = np.random.rand(NUM_EXAMPLES, 5)
self.targets = np.random.rand(NUM_EXAMPLES, LABEL_DIMENSION)
def testPredictScores(self):
"""Tests that DebugRegressor outputs the mean target."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.targets])
mean_target = np.mean(train_labels, 0)
expected_prediction = np.vstack(
[mean_target for _ in range(test_labels.shape[0])])
classifier = debug.DebugRegressor(label_dimension=LABEL_DIMENSION)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_scores(input_fn=_input_fn_builder(test_features,
None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=debug.DebugRegressor(),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, debug.DebugRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
regressor.fit(input_fn=input_fn, steps=200)
scores = regressor.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(x=train_x, y=train_y, steps=200)
scores = regressor.evaluate(x=train_x, y=train_y, steps=1)
self.assertIn('loss', scores)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = debug.DebugRegressor(
weight_column_name='w', config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = debug.DebugRegressor(
weight_column_name='w', config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(
list(regressor.predict_scores(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
model_dir = tempfile.mkdtemp()
regressor = debug.DebugRegressor(
model_dir=model_dir, config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = debug.DebugRegressor(
model_dir=model_dir, config=run_config.RunConfig(tf_random_seed=1))
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
if __name__ == '__main__':
test.main()
| apache-2.0 |
zrhans/python | topicos/py/ATMOS-anuais.py | 1 | 4432 | #!/usr/bin/python
import sys, getopt
# coding: utf-8
# #Analise e Tratamento Basico (Triagem) de dados
# Analises por Hans. 2015
#
# * 2012 (10sec)
# * 2013 (10sec)
# * 2014 (10sec ate 1Min seguinte)
# * 2015 (1Min)
#
# ----
# In[1]:
import numpy as np
import pandas as pd
print(sys.version) # Versao do python - Opcional
print(np.__version__) # VErsao do modulo numpy - Opcional
import matplotlib
import matplotlib.pyplot as plt
#get_ipython().magic('matplotlib inline')
import datetime
import time
#Dados local
caminho = './dados/'
def app(ano):
print("Analisando ano %s" % ano)
df_dados = pd.read_csv(caminho+'sr311-'+ano+'.csv', index_col=None, parse_dates=['Timestamp'])
print("Dados "+ano+" Importados OK")
#removendo a primeira coluna, e ajustando a coluna Timestamp para ser o indice
del(df_dados['Unnamed: 0'])
df_dados.set_index('Timestamp', inplace=True)
# Selecionando apenas algumas colunas de interesse
df_dados = df_dados[['AirTC', 'RH', 'Rain_mm']]
print('Gerando Graficos Brutos - '+ano)
plt.figure(figsize=(16,8))
plt.subplot(2, 1, 1)
plt.title('Dados Brutos %s' % ano)
plt.xlabel=''
df_dados.AirTC.plot()
df_dados.RH.plot()
plt.subplot(2, 1, 2)
plt.ylabel('Chuva - mm')
plt.xlabel='Data'
df_dados.Rain_mm.plot()
#plt.savefig('figs/raw-'+ano+'-rain.png')
plt.savefig('figs/raw-'+ano+'.png')
sys.exit()
# Criando um novo dominio continuo com base no inicio e fim da serie de dados original
d = pd.DataFrame(index=pd.date_range(pd.datetime(int(ano),1,1), pd.datetime(int(ano),12,31), freq='10s'))
print("Indice criado OK")
# Unindo os dois DataFrames pela esquerda (o que não houver em d será substituÃdo por NaN
ndf_dados = d.join(df_dados)
#ndf_dados.fillna(0) #Substitui valor NaN por 0
print("Junção OK")
print('Gerando Graficos Brutos Reindexados - %s' % ano)
plt.figure(figsize=(16,8))
plt.title('Dados Brutos %s' % ano)
plt.ylabel('Temperatura - Umidade')
ndf_dados.AirTC.plot()
ndf_dados.RH.plot()
plt.savefig('figs/brutos-%s.png' % ano)
plt.figure(figsize=(16,8))
plt.ylabel('Chuva')
ndf_dados.Rain_mm.plot()
plt.savefig('figs/brutos-chuva-%s.png' % ano)
#Exportando para um novo arquivo
df_dados.to_csv('sao_roque_%s-AirTC-RH-Rain.csv' % ano)
# ###Analises Diarias
df_dados_diarios = df_dados[['AirTC','RH']].resample('D', how='mean')
chuva = df_dados.Rain_mm.resample('D', how='sum')
df_dados_diarios['Acum_Chuva'] = chuva
# Mostrando Medias diarias
print('Gerando Graficos Media Diaria - %s' % ano)
plt.figure(figsize=(16,8))
plt.subplot(2,1,1)
plt.title('Media Diaria %s' % ano)
plt.legend=True
df_dados_diarios.AirTC.plot(label="Temp")
df_dados_diarios.RH.plot(label="RH")
df_dados_diarios.Acum_Chuva.plot(label="ChuvaAcum")
print('Gerando Graficos Acumulado de Chuva - %s' % ano)
plt.subplot(2,1,2)
somado = df_dados_diarios.Acum_Chuva.cumsum()
somado.plot(label='Acum %s' % ano)
plt.savefig('figs/AcumChuva- %s.png' % ano)
# Histograma do Acumulado Diario
print('Gerando Graficos Histograma Acumulado diario de Chuva - %s' % ano)
plt.figure(figsize=(16,8))
df_dados_diarios.Acum_Chuva.plot(kind='hist', orientation='horizontal', alpha=.8)
plt.savefig('figs/AcumChuvaHist-%s.png' % ano)
# ### Quais dias o Acumulado de chuva foi superior a 20mm em ano ?
# Quais dias o Acumulado de chuva foi superior a 20mm em ano?
print(df_dados_diarios.Acum_Chuva[df_dados_diarios.Acum_Chuva > 20.])
# Quantos dias o Acumulado de chuva foi superior a 20mm em 2015?
print(df_dados_diarios.Acum_Chuva[df_dados_diarios.Acum_Chuva > 20.].count())
return
def main(argv):
inputfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print('test.py -i <arq_entrada> -o <arq_saida>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('test.py -i <arq_entrada> -o <arq_saida>')
sys.exit();
elif opt in ("-i", "--iano"):
#inputfile = arg
app(arg)
elif opt in ("-o", "--ofile"):
outputfile = arg
print('Arquivo de Entrada: "', inputfile)
print('Arquivo de Saida : "', outputfile)
if __name__ == "__main__":
main(sys.argv[1:])
| gpl-2.0 |
nhejazi/scikit-learn | examples/neighbors/plot_classification.py | 55 | 1805 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
# we only take the first two features. We could avoid this ugly
# slicing by using a two-dim dataset
X = iris.data[:, :2]
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold,
edgecolor='k', s=20)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
| bsd-3-clause |
ducandu/aiopening | aiopening/examples/test_image_classification/main.py | 1 | 16880 |
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import problem_unittests as tests
import tarfile
cifar10_dataset_folder_path = 'cifar-10-batches-py'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile('cifar-10-python.tar.gz'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar:
urlretrieve(
'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',
'cifar-10-python.tar.gz',
pbar.hook)
if not isdir(cifar10_dataset_folder_path):
with tarfile.open('cifar-10-python.tar.gz') as tar:
tar.extractall()
tar.close()
tests.test_folder_path(cifar10_dataset_folder_path)
# ## Explore the Data
# The dataset is broken into batches to prevent your machine from running out of memory. The CIFAR-10 dataset consists of 5 batches, named `data_batch_1`, `data_batch_2`, etc.. Each batch contains the labels and images that are one of the following:
# * airplane
# * automobile
# * bird
# * cat
# * deer
# * dog
# * frog
# * horse
# * ship
# * truck
#
# Understanding a dataset is part of making predictions on the data. Play around with the code cell below by changing the `batch_id` and `sample_id`. The `batch_id` is the id for a batch (1-5). The `sample_id` is the id for a image and label pair in the batch.
#
# Ask yourself "What are all possible labels?", "What is the range of values for the image data?", "Are the labels in order or random?". Answers to questions like these will help you preprocess the data and end up with better predictions.
# In[2]:
#get_ipython().magic('matplotlib inline')
#get_ipython().magic("config InlineBackend.figure_format = 'retina'")
import helper
import numpy as np
# Explore the dataset
batch_id = 4
sample_id = 222
#helper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id)
# In[3]:
import pickle
def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
"""
Load a batch of the dataset
"""
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
labels = batch['labels']
return features, labels
# In[4]:
# check value range of data
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_id)
print("min value: {:d} max value: {:d}".format(features.min(), features.max()))
# ## Implement Preprocess Functions
# ### Normalize
# In the cell below, implement the `normalize` function to take in image data, `x`, and return it as a normalized Numpy array. The values should be in the range of 0 to 1, inclusive. The return object should be the same shape as `x`.
# In[5]:
#def normalize(x):
# """
# Normalize a list of sample image data in the range of 0 to 1
# : x: List of image data. The image shape is (32, 32, 3)
# : return: Numpy array of normalize data
# """
# # TODO: Implement Function
# normalizer = lambda n: n/255
# return np.vectorize normalizer
# do this in one line instead with a lambda and numpy's great vectorize function :)
normalize = np.vectorize(lambda i: i/255)
# ### One-hot encode
# Just like the previous code cell, you'll be implementing a function for preprocessing. This time, you'll implement the `one_hot_encode` function. The input, `x`, are a list of labels. Implement the function to return the list of labels as One-Hot encoded Numpy array. The possible values for labels are 0 to 9. The one-hot encoding function should return the same encoding for each value between each call to `one_hot_encode`. Make sure to save the map of encodings outside the function.
#
# Hint: Don't reinvent the wheel.
# In[6]:
def one_hot_encode(x):
"""
One hot encode a list of sample labels. Return a one-hot encoded vector for each label.
: x: List of sample Labels
: return: Numpy array of one-hot encoded labels
"""
# the return matrix (shape: [num samples x 10 categories])
ret = np.zeros([len(x), 10], dtype=np.float32)
for i, label in enumerate(x):
ret[i][label] = 1 # only set the label/category-slot to 1, leave all others at 0
return ret
import pickle
import helper
# Load the Preprocessed Validation data
valid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb'))
import importlib
import aiopening as ai
import tensorflow as tf
n_classes = 10
class MyNet(ai.Model):
def __init__(self, name, num_colors=3, **kwargs):
super().__init__(name, **kwargs)
# input depth (the 4th slot of the shape of the input volume (#samples x w x h x in-depth))
self.num_colors = num_colors
# have to implement the build method
# build the entire graph from scratch into the default graph (the call to reset will handle this for us)
def construct(self):
# define our three inputs to this graph
x, y, keep_prob = self.add_feeds((tf.float32, [None, 32, 32, 3], "x"), (tf.float32, [None, n_classes], "y"),
(tf.float32, None, "keep_prob"))
# create the convolutional part
conv = ai.modules.Convolutional2DNN("convolutional_module",
output_channels=16,
kernel_shapes=(8, 8),
strides=(1, 1),
max_pooling=True,
pool_k_sizes=(2, 2),
pool_strides=(2, 2)
)
# now conv is an snt.AbstractModule
conv_out = conv(x)
# add dropout to conv layer
conv_out = tf.nn.dropout(conv_out, keep_prob)
# conv_out is the output of the convolutional part AND input the the next module (flatten)
flatten = ai.modules.FlattenLayer("flatten_module")
flatten_out = flatten(conv_out)
# flatten_out is the output of the flattening operation AND the input to the next module (fully connected)
fc = ai.modules.FullyConnectedNN("fc_module", [160], activations=[tf.nn.relu])
fc_out = fc(flatten_out)
fc_end = ai.modules.FullyConnectedNN("logits", [10], activations=None)
logits = fc_end(fc_out)
# out are now the logits coming from the last layer
self._outputs["logits"] = logits
# Loss and Optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y), name="cost")
train_op = tf.train.AdamOptimizer().minimize(cost, name="train_op") # set this model's train operation
# predictions
predictions = tf.argmax(logits, 1, name="predictions") # 1=axis 1 (0 is the batch)
# Accuracy
correct_predictions = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1)) # this will be true or false values
# casting true/false will result in 0.0/1.0, respectively
# the mean of these 0.0 or 1.0 over all samples will give the accuracy over all samples
accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32), name="accuracy")
self.add_outputs(logits, cost, train_op, predictions, accuracy)
# question: where is the print-out for the validation accuracy. This value is missing in the entire project.
# how do we know when to stop training?
# Also, this function is only called on the last piece of the batch that's being worked on (1 to 5)
# so since there are 9000 training samples in a batch and my batch size if 512, this function is only run on the
# last 296 samples, which doesn't really make sense. It should be run on the entire training set PLUS on
# the validation set separately
def print_stats(self, session, feature_batch, label_batch):
"""
Print information about loss and validation accuracy
: session: Current TensorFlow session
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
: cost: TensorFlow cost function
: accuracy: TensorFlow accuracy function
"""
cost_out, acc_out = session.run([self.get_output("cost"), self.get_output("accuracy")],
feed_dict={self.get_feed("x"): feature_batch, self.get_feed("y"): label_batch, self.get_feed("keep_prob"): 1.0})
###DEBUG: cost_out, acc_out, logits_out, labels_out = session.run([cost, accuracy, logits, y], feed_dict={x: feature_batch, y: label_batch, keep_prob: 1.0})
print("Loss: {:.2f} Accuracy: {:.2f}%".format(cost_out, acc_out*100))
#### DEBUG: (had to find out why cost would go down to ~2.3 and accuracy would stay at ~10%, it's because all 10 categories would get 0.1 in the softmax, which is useless)
## list of 10-logits (predicted) for each input sample
#logits_list = tf.unpack(logits_out)
## list of 10-logits (actual label) for each input sample
#labels_list = tf.unpack(labels_out)
## loop over all input samples
#for i, l in enumerate(logits_list):
# print("Sample: "+str(i))
# predicted_cats = tf.unpack(l)
# actual_cats = tf.unpack(labels_list[i])
# print("output-predictions (softmaxed): "+str(session.run(tf.nn.softmax(predicted_cats))))
# print("output-labels: "+str(session.run(actual_cats)))
epochs = 1
batch_size = 2048
keep_probability = 1.0 # making this smaller 1.0 usually gives me worse results (I'm guessing my model is too simple to be needing dropout at all)
myModel = MyNet("test_conv_nn", 3)
"""
print('Checking the Training on a Single Batch...')
with tf.Session() as sess:
## Initializing the variables
#sess.run(tf.global_variables_initializer())
# Training cycle -> this will be the algorithm
for epoch in range(epochs):
batch_i = 1
i = 0
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
#print("training mini-batch {:d} Len-features={:d} Len-labels={:d}".format(i, len(batch_features), len(batch_labels)))
myModel.train(sess, {"keep_prob": keep_probability, "x": batch_features, "y": batch_labels}, init=True if epoch == 0 and i == 0 else False)
i += 1
print('Epoch {:>2}/{:d}, CIFAR-10 Batch {:d}:'.format(epoch + 1, epochs, batch_i))
print('Training Set: ', end='')
myModel.print_stats(sess, batch_features, batch_labels)
"""
# ### Fully Train the Model
# Now that you got a good accuracy with a single CIFAR-10 batch, try it with all five batches.
# In[ ]:
print('Training...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
# Loop over all CIFAR-10 batches
n_batches = 5
for batch_i in range(1, n_batches + 1):
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
sess.run(myModel.get_output("train_op"), feed_dict={
myModel.get_feed("keep_prob"): keep_probability,
myModel.get_feed("x"): batch_features,
myModel.get_feed("y"): batch_labels})
myModel.print_stats(sess, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}:'.format(epoch + 1, batch_i),)
print('Training Set: ', end='')
# CORRECTION: print out validation loss and accuracy
# also, using the print_stats function now instead of 'custom' code
#print('Validation Set: ', end='')
#print_stats(sess, valid_feature_batch, valid_label_batch, cost, accuracy)
#print("")
# Save Model's variables
myModel.save_state(sess)
"""
import tensorflow as tf
import pickle
import helper
import random
# Set batch size if not already set
try:
if batch_size:
pass
except NameError:
batch_size = 64
save_model_path = './image_classification'
n_samples = 4
top_n_predictions = 3
def test_model():
#Test the saved model against the test dataset
test_features, test_labels = pickle.load(open('preprocess_training.p', mode='rb'))
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load model
loader = tf.train.import_meta_graph(save_model_path + '.meta')
loader.restore(sess, save_model_path)
# Get Tensors from loaded model
loaded_x = loaded_graph.get_tensor_by_name('x:0')
loaded_y = loaded_graph.get_tensor_by_name('y:0')
loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
loaded_logits = loaded_graph.get_tensor_by_name('logits:0')
loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0')
# Get accuracy in batches for memory limitations
test_batch_acc_total = 0
test_batch_count = 0
for train_feature_batch, train_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size):
test_batch_acc_total += sess.run(
loaded_acc,
feed_dict={loaded_x: train_feature_batch, loaded_y: train_label_batch, loaded_keep_prob: 1.0})
test_batch_count += 1
print('Testing Accuracy: {}\n'.format(test_batch_acc_total/test_batch_count))
# Print Random Samples
random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples)))
random_test_predictions = sess.run(
tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions),
feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0})
helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions)
test_model()
"""
# ## Why 50-70% Accuracy?
# You might be wondering why you can't get an accuracy any higher. First things first, 50% isn't bad for a simple CNN. Pure guessing would get you 10% accuracy. However, you might notice people are getting scores [well above 70%](http://rodrigob.github.io/are_we_there_yet/build/classification_datasets_results.html#43494641522d3130). That's because we haven't taught you all there is to know about neural networks. We still need to cover a few more techniques.
# ## Submitting This Project
# When submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as "dlnd_image_classification.ipynb" and save it as a HTML file under "File" -> "Download as". Include the "helper.py" and "problem_unittests.py" files in your submission.
# ## Trying my daughter's (7) frog painting :)
#
# In[ ]:
"""import matplotlib.pyplot as plt
from matplotlib.image import imread
import helper
image1 = imread("/home/ubuntu/deep-learning/image-classification/katis_frog.png")
image2 = imread("/home/ubuntu/deep-learning/image-classification/henrik_auto.png")
image3 = imread("/home/ubuntu/deep-learning/image-classification/katis_dog.png")
f, ax = plt.subplots(3, sharey=True)
ax[0].set_title('picture 0')
ax[0].imshow(image1)
ax[1].set_title('picture 1')
ax[1].imshow(image2)
ax[2].set_title('picture 2')
ax[2].imshow(image3)
#plt.xlim(0, 32)
#plt.ylim(32, 0)
# slice away alpha channel
def slice_alpha(image):
return image[:,:,:-1]
image1 = slice_alpha(image1)
image2 = slice_alpha(image2)
image3 = slice_alpha(image3)
# fill up image array (1st dim of input tensor)
images = [image1, image2, image3]
loaded_graph = tf.Graph()
label_names = helper._load_label_names()
with tf.Session(graph=loaded_graph) as sess:
# Load model
loader = tf.train.import_meta_graph(save_model_path + '.meta')
loader.restore(sess, save_model_path)
# Get Tensors from loaded model
loaded_x = loaded_graph.get_tensor_by_name('x:0')
loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
loaded_logits = loaded_graph.get_tensor_by_name('logits:0')
predictions = sess.run(tf.argmax(loaded_logits, 1), feed_dict={loaded_x: images, loaded_keep_prob: 1.0})
#print(predictions)
for i,pred in enumerate(predictions):
print("Picture {:d} is showing a {:s}".format(i,label_names[pred]))
"""
def predict(self, session, inputs):
predictions = session.run(self._predictions, feed_dict=self.get_feed_dict())
| mit |
RobertABT/heightmap | build/matplotlib/examples/axes_grid/demo_axes_hbox_divider.py | 7 | 1547 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.axes_divider import HBoxDivider
import mpl_toolkits.axes_grid1.axes_size as Size
def make_heights_equal(fig, rect, ax1, ax2, pad):
# pad in inches
h1, v1 = Size.AxesX(ax1), Size.AxesY(ax1)
h2, v2 = Size.AxesX(ax2), Size.AxesY(ax2)
pad_v = Size.Scaled(1)
pad_h = Size.Fixed(pad)
my_divider = HBoxDivider(fig, rect,
horizontal=[h1, pad_h, h2],
vertical=[v1, pad_v, v2])
ax1.set_axes_locator(my_divider.new_locator(0))
ax2.set_axes_locator(my_divider.new_locator(2))
if __name__ == "__main__":
arr1 = np.arange(20).reshape((4,5))
arr2 = np.arange(20).reshape((5,4))
fig, (ax1, ax2) = plt.subplots(1,2)
ax1.imshow(arr1, interpolation="nearest")
ax2.imshow(arr2, interpolation="nearest")
rect = 111 # subplot param for combined axes
make_heights_equal(fig, rect, ax1, ax2, pad=0.5) # pad in inches
for ax in [ax1, ax2]:
ax.locator_params(nbins=4)
# annotate
ax3 = plt.axes([0.5, 0.5, 0.001, 0.001], frameon=False)
ax3.xaxis.set_visible(False)
ax3.yaxis.set_visible(False)
ax3.annotate("Location of two axes are adjusted\n"
"so that they have equal heights\n"
"while maintaining their aspect ratios", (0.5, 0.5),
xycoords="axes fraction", va="center", ha="center",
bbox=dict(boxstyle="round, pad=1", fc="w"))
plt.show()
| mit |
fubar2/vapetemp | devhist/stripchartTemp.py | 1 | 8544 | #!/usr/bin/python
# mods ross lazarus jan 2015
# jan 8 no graphics from matplotlib?
# update to jessie and beware that if you use apt-get install python-matplotlib,
# mathplotlib may (still) be defaulting to the wrong
# device - if matplotlib.get_backend() doesn't return u'TkAgg' then you
# might need to update to the git matplotlib repo - worked for me
#
# using wheezy, access to gpio was via /dev/mem so required root
# I noted:
# jan 6 might need to use pigpio for non sudo running
# see http://abyz.co.uk/rpi/pigpio/download.html
# MUST start with
# sudo pigpiod
#
# jan 5 developing a vaporizer temperature probe system
# started out with the adafruit sample for my max31855
# added a stripchart and changed to spi
# Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Can enable debug output by uncommenting:
#import logging
#logging.basicConfig(level=logging.DEBUG)
from __future__ import division, print_function
import sys
import time
import Adafruit_GPIO.SPI as SPI
import Adafruit_MAX31855.MAX31855 as MAX31855
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
import RPi.GPIO as GPIO
PDio = 4 ## adjust to suit wherever you've set up your
## photodetector. Using a photoresistor on gpio 4
## with a capacitor
def photoDet(PDio=4):
# make a generator for photocell status
while True:
GPIO.setmode(GPIO.BCM) # broadcom
reading = 0
GPIO.setup(PDio, GPIO.OUT) # d/c capacitor
GPIO.output(PDio, GPIO.LOW)
time.sleep(0.1)
GPIO.setup(PDio, GPIO.IN)
# This takes about 1 millisecond per loop cycle
while (GPIO.input(PDio) == GPIO.LOW):
reading += 1
yield reading
class stripTemp():
# class to encapsulate the strip plotter matlab code
def __init__(self,PLOTSEC=600,outfname='stripchartTemp'):
self.PLOTSEC = 10 #PLOTSEC #720 # 12 minute sessions for x axis
self.QUITS = ("q","Q")
self.STARTIN = ('I','i')
self.ENDIN = ('E','e')
# Uncomment one of the blocks of code below to configure your Pi or BBB to use
# software or hardware SPI.
# Raspberry Pi software SPI configuration.
#CLK = 25
#CS = 24
#DO = 18
#sensor = MAX31855.MAX31855(CLK, CS, DO)
# Raspberry Pi hardware SPI configuration.
self.SPI_PORT = 0
self.SPI_DEVICE = 0
self.sensor = MAX31855.MAX31855(spi=SPI.SpiDev(self.SPI_PORT, self.SPI_DEVICE))
# BeagleBone Black software SPI configuration.
#CLK = 'P9_12'
#CS = 'P9_15'
#DO = 'P9_23'
#sensor = MAX31855.MAX31855(CLK, CS, DO)
# BeagleBone Black hardware SPI configuration.
#SPI_PORT = 1
#SPI_DEVICE = 0
#sensor = MAX31855.MAX31855(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))
self.ts = self.getTempStream()
self.outf = open('%s.xls' % outfname,'w')
self.savepng = '%s.png' % outfname
self.outf.write('Second\tTemperature\n')
# set up a generator for PLOTSEC seconds of sampling
style.use('ggplot')
# prepopulate arrays to make updates quicker
self.t = range(self.PLOTSEC+2)
self.y = [None for i in self.t]
self.fig, self.ax = plt.subplots()
#self.ax.set_xlim(auto=True)
#self.ax.set_ylim(auto=True)
# fig.canvas.mpl_connect('key_press_event', press)
self.line, = self.ax.plot(0,0,lw=1)
self.text_template = 'Last temperature = %3.2f'
self.title_template = 'Temperature strip chart at %d seconds'
self.t_temp = self.ax.text(0.15,0.15,self.text_template%(0),
transform=self.ax.transAxes, family='monospace',fontsize=10)
#self.t_title = self.ax.text(0.5,1.0,self.title_template%(0),
# transform=self.ax.transAxes, family='monospace',fontsize=20)
self.started = time.time()
# Define a function to convert celsius to fahrenheit.
def c_to_f(self,c):
return c * 9.0 / 5.0 + 32.0
def press(self,event):
print('press', event.key)
sys.stdout.flush()
if event.key in self.QUITS:
go = False
elif event.key in self.STARTIN:
inhale = True
elif event.key in self.ENDIN:
inhale = False
def getTempStream(self,maxx = 10000):
# generator for a stream of mean temps averaged every second or so
# for (eg) 720 maxsec seconds
# samples per sec will vary, so allow a lot
# in practice, taking about 4.5k samples/sec with hardware SPI a/d conversion
# on a pi 2 b january 2015
while True:
x = [0 for n in range(maxx)]
loop = 0
started = time.time()
while ((time.time() - started) < 1.0):
temp = self.sensor.readTempC()
internal = self.sensor.readInternalC()
diff = temp - internal
x[loop] = temp
loop += 1
lasti = loop-1
# print('# took',lasti+1,'samples')
xmean = sum(x[0:(lasti)])/float(lasti)
yield(xmean)
def animate(self,i):
# add new point to y-value arrays
try:
mT = self.ts.next()
except StopIteration:
print('# stop!')
self.outf.close()
return self.line, self.t_temp, self.t_title, self.fig.suptitle
self.y[i+1] = mT
self.outf.write('%d\t%g\n' % (i,mT))
self.outf.flush()
print(mT,i+1)
if (i > 0):
useme = self.y[0:(i+1)]
avg = sum(useme)/float(i)
self.line.set_xdata( self.t[0:(i+1)] )
self.line.set_ydata( useme )
plt.axis([0,i+1,min(useme)-1,max(useme)+1])
else:
avg = mT
variation = 1
#self.ax.set_xlim(0,1)
self.line.set_xdata([0,] )
self.line.set_ydata( [mT,] )
plt.axis([0,1,mT-1,mT+1])
#self.ax.figure.canvas.draw() # this is necessary, to redraw the axes
self.t_temp.set_text( self.text_template % (mT) )
self.fig.suptitle(self.title_template % (i) )
#plt.draw()
return self.line, self.t_temp, self.fig.suptitle
def initFunc(self): #Init only required for blitting to give a clean slate.
y1 = self.ts.next()
print(y1,'0')
self.y[0] = y1
self.line.set_ydata([y1,])
self.t_temp.set_text("Starting up")
self.fig.suptitle(self.title_template % (0) )
self.ax.set_ylim(y1-1,y1+1)
return self.t_temp,self.line,self.fig.suptitle,self.ax
def startup(self):
self.ani = animation.FuncAnimation(self.fig, self.animate,
init_func=self.initFunc, frames=self.PLOTSEC+1,
repeat=False, blit=False) # blit has to be False, to avoid ugly artefacts on the text
blink = photoDet()
for b in blink:
print(b)
##nowSec = 0
##strip = stripTemp()
##strip.startup()
##plt.show()
##while go:
## mT = ts.next()
## y[nowSec] = mT
## print(nowSec,mT)
## nowSec += 1
## plotlab = 'Temperature strip chart at %d seconds' % nowSec
## ax.plot(y[0:nowSec],range(0,nowSec))
## plt.show(block=False)
## time.sleep(0.01)
| gpl-3.0 |
abimannans/scikit-learn | sklearn/tests/test_grid_search.py | 83 | 28713 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
sanjanalab/GUIDES | static/data/gtex/generate_exons.py | 2 | 5703 | ### Generate exon start and stop sites
import pandas as pd
import pickle
import time
t0 = time.time()
# Create ENSG -> cdsStart, cdsStop mapping
# this is then used inside exon_info generator.
def union_intervals(intervals):
sorted_by_lower_bound = sorted(intervals, key=lambda tup: tup[0])
merged = []
for higher in sorted_by_lower_bound:
if not merged:
merged.append(higher)
else:
lower = merged[-1]
# test for intersection between lower and higher:
# we know via sorting that lower[0] <= higher[0]
if higher[0] <= lower[1]:
upper_bound = max(lower[1], higher[1])
merged[-1] = (lower[0], upper_bound) # replace by merged interval
else:
merged.append(higher)
return merged
def restrict_to_intervals(start, stop, intervals):
for interval in intervals:
if interval[0] >= start and interval[1] <= stop: # refGene should contain the CCDS. CCDS inside RefGene
return interval
return (start, start + 1)
print 'opening CCDS_coords.csv', time.time() - t0
# CCDS -> range mapping (str |-> list)
with open('../pre_processed/CDS/CCDS_coords.csv', 'r') as ccds_coords_file:
ccds_df = pd.read_csv(ccds_coords_file, sep="\t", header=0)
# ENSG -> CCDS mapping --> list of intervals [(a,b), (c,d), ...]
print 'opening ENSG-CCDS_hum.txt', time.time() - t0
ensg_ccds_map = {}
with open('../pre_processed/CDS/ENSG-CCDS_hum.txt', 'r') as ensg_ccds:
for line in ensg_ccds:
comps = line.strip('\n').split('\t')
ensg = comps[0] + '.' + comps[1]
ccds = comps[2] + '.'
if ensg == "ENSG00000137185.7":
print ensg, ccds
if len(ccds) > 0:
starts = ccds_df[ccds_df['name'].str.startswith(ccds)]['exonStarts']
stops = ccds_df[ccds_df['name'].str.startswith(ccds)]['exonEnds']
if len(starts) > 0 and len(stops) > 0:
starts = starts.tolist()[0]
stops = stops.tolist()[0]
starts_nums = [int(num) for num in starts.split(',')[:-1]]
stops_nums = [int(num) for num in stops.split(',')[:-1]]
if ensg not in ensg_ccds_map:
ensg_ccds_map[ensg] = union_intervals([(int(a),int(b)) for a, b in zip(starts_nums, stops_nums)])
else:
new_intervals = [(int(a),int(b)) for a, b in zip(starts_nums, stops_nums)]
all_intervals = ensg_ccds_map[ensg] + new_intervals
ensg_ccds_map[ensg] = union_intervals(all_intervals)
print 'ensg_ccds_map["ENSG00000137185.7"]'
print ensg_ccds_map["ENSG00000137185.7"]
print 'starting main script', time.time() - t0
if __name__ == "__main__":
filename = "refGene_base.txt"
df = pd.read_csv(filename, sep="\t", header=None)
df.columns=['','name','chrom','strand','txStart','txEnd','cdsStart','cdsEnd','exonCount','exonStarts','exonEnds','id','name2','cdsStartStat','cdsEndStat','exonFrames']
# process the dataframe
print len(df)
results_df = pd.DataFrame(columns=['','name','chrom','strand','txStart','txEnd','cdsStart','cdsEnd','exonCount','exonStarts','exonEnds','id','name2','cdsStartStat','cdsEndStat','exonFrames'])
res_idx = 0
for i, row in df.iterrows():
if i % len(df) / 1000 == 0: print i
starts_list = [int(num) for num in row['exonStarts'].split(',')[:-1]]
ends_list = [int(num) for num in row['exonEnds'].split(',')[:-1]]
df.set_value(i, 'exonStarts', [0,0,0] + starts_list) # hacky - will force an error if 'exonStarts' never changed below.
# if we have ccds info...
if row['name'] in ensg_ccds_map:
starts_list_processed = []
ends_list_processed = []
cds_intervals = ensg_ccds_map[row['name']]
assert(len(starts_list) == len(ends_list))
for j in range(len(starts_list)):
start, end = starts_list[j], ends_list[j]
new_interval = restrict_to_intervals(start, end, cds_intervals)
if new_interval[1] == new_interval[0] + 1:
print "removed exon " + str(j) + " of " + str(len(starts_list))
starts_list_processed.append(new_interval[0])
ends_list_processed.append(new_interval[1])
# Expand to include intronic sequences (5 each side)
for k in range(len(starts_list_processed)):
starts_list_processed[k] -= 5
for k in range(len(ends_list_processed)):
ends_list_processed[k] += 5
df.set_value(i, 'exonStarts', list(starts_list_processed))
df.set_value(i, 'exonEnds', list(ends_list_processed))
df.set_value(i, 'cdsStart', cds_intervals[0][0])
df.set_value(i, 'cdsEnd', cds_intervals[-1][1])
new_row = df.iloc[i]
results_df.loc[res_idx] = new_row
res_idx += 1
#else: # we dont' have ccds... keep default
# Expand to include intronic sequences (5 each side)
#for k in range(len(starts_list)):
# starts_list[k] -= 5
#for k in range(len(ends_list)):
# ends_list[k] += 5
#df.set_value(i, 'exonStarts', starts_list)
#df.set_value(i, 'exonEnds', ends_list)
# write exon_info
exon_info = results_df[["name", "chrom", "strand", "exonCount", "exonStarts", "exonEnds"]]
with open("../pre_processed/exon_info.p", "wb") as f:
pickle.dump(exon_info, f)
# write new refGene
results_df['exonStarts'] = results_df.apply(lambda x: (','.join([str(n) for n in x['exonStarts']]) + ','), axis=1)
results_df['exonEnds'] = results_df.apply(lambda x: (','.join([str(n) for n in x['exonEnds']]) + ','), axis=1)
results_df.to_csv('refGene.txt', sep="\t", index=False, header=False)
end_time = time.time()
hours, rem = divmod(end_time-t0, 3600)
minutes, seconds = divmod(rem, 60)
print "time elapsed"
print("{:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds))
| bsd-3-clause |
joequant/zipline | zipline/history/history_container.py | 18 | 33931 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from bisect import insort_left
from collections import namedtuple
from itertools import groupby, product
import logbook
import numpy as np
import pandas as pd
from six import itervalues, iteritems, iterkeys
from . history import HistorySpec
from zipline.finance.trading import with_environment
from zipline.utils.data import RollingPanel, _ensure_index
from zipline.utils.munge import ffill, bfill
logger = logbook.Logger('History Container')
# The closing price is referred to by multiple names,
# allow both for price rollover logic etc.
CLOSING_PRICE_FIELDS = frozenset({'price', 'close_price'})
def ffill_buffer_from_prior_values(freq,
field,
buffer_frame,
digest_frame,
pv_frame,
raw=False):
"""
Forward-fill a buffer frame, falling back to the end-of-period values of a
digest frame if the buffer frame has leading NaNs.
"""
# convert to ndarray if necessary
digest_values = digest_frame
if raw and isinstance(digest_frame, pd.DataFrame):
digest_values = digest_frame.values
buffer_values = buffer_frame
if raw and isinstance(buffer_frame, pd.DataFrame):
buffer_values = buffer_frame.values
nan_sids = pd.isnull(buffer_values[0])
if np.any(nan_sids) and len(digest_values):
# If we have any leading nans in the buffer and we have a non-empty
# digest frame, use the oldest digest values as the initial buffer
# values.
buffer_values[0, nan_sids] = digest_values[-1, nan_sids]
nan_sids = pd.isnull(buffer_values[0])
if np.any(nan_sids):
# If we still have leading nans, fall back to the last known values
# from before the digest.
key_loc = pv_frame.index.get_loc((freq.freq_str, field))
filler = pv_frame.values[key_loc, nan_sids]
buffer_values[0, nan_sids] = filler
if raw:
filled = ffill(buffer_values)
return filled
return buffer_frame.ffill()
def ffill_digest_frame_from_prior_values(freq,
field,
digest_frame,
pv_frame,
raw=False):
"""
Forward-fill a digest frame, falling back to the last known prior values if
necessary.
"""
# convert to ndarray if necessary
values = digest_frame
if raw and isinstance(digest_frame, pd.DataFrame):
values = digest_frame.values
nan_sids = pd.isnull(values[0])
if np.any(nan_sids):
# If we have any leading nans in the frame, use values from pv_frame to
# seed values for those sids.
key_loc = pv_frame.index.get_loc((freq.freq_str, field))
filler = pv_frame.values[key_loc, nan_sids]
values[0, nan_sids] = filler
if raw:
filled = ffill(values)
return filled
return digest_frame.ffill()
def freq_str_and_bar_count(history_spec):
"""
Helper for getting the frequency string and bar count from a history spec.
"""
return (history_spec.frequency.freq_str, history_spec.bar_count)
@with_environment()
def next_bar(spec, env):
"""
Returns a function that will return the next bar for a given datetime.
"""
if spec.frequency.unit_str == 'd':
if spec.frequency.data_frequency == 'minute':
return lambda dt: env.get_open_and_close(
env.next_trading_day(dt),
)[1]
else:
return env.next_trading_day
else:
return env.next_market_minute
def compute_largest_specs(history_specs):
"""
Maps a Frequency to the largest HistorySpec at that frequency from an
iterable of HistorySpecs.
"""
return {key: max(group, key=lambda f: f.bar_count)
for key, group in groupby(
sorted(history_specs, key=freq_str_and_bar_count),
key=lambda spec: spec.frequency)}
# tuples to store a change to the shape of a HistoryContainer
FrequencyDelta = namedtuple(
'FrequencyDelta',
['freq', 'buffer_delta'],
)
LengthDelta = namedtuple(
'LengthDelta',
['freq', 'delta'],
)
HistoryContainerDeltaSuper = namedtuple(
'HistoryContainerDelta',
['field', 'frequency_delta', 'length_delta'],
)
class HistoryContainerDelta(HistoryContainerDeltaSuper):
"""
A class representing a resize of the history container.
"""
def __new__(cls, field=None, frequency_delta=None, length_delta=None):
"""
field is a new field that was added.
frequency is a FrequencyDelta representing a new frequency was added.
length is a bar LengthDelta which is a frequency and a bar_count.
If any field is None, then no change occurred of that type.
"""
return super(HistoryContainerDelta, cls).__new__(
cls, field, frequency_delta, length_delta,
)
@property
def empty(self):
"""
Checks if the delta is empty.
"""
return (self.field is None and
self.frequency_delta is None and
self.length_delta is None)
def normalize_to_data_freq(data_frequency, dt):
if data_frequency == 'minute':
return dt
return pd.tslib.normalize_date(dt)
class HistoryContainer(object):
"""
Container for all history panels and frames used by an algoscript.
To be used internally by TradingAlgorithm, but *not* passed directly to the
algorithm.
Entry point for the algoscript is the result of `get_history`.
"""
VALID_FIELDS = {
'price', 'open_price', 'volume', 'high', 'low', 'close_price',
}
def __init__(self,
history_specs,
initial_sids,
initial_dt,
data_frequency,
bar_data=None):
"""
A container to hold a rolling window of historical data within a user's
algorithm.
Args:
history_specs (dict[Frequency:HistorySpec]): The starting history
specs that this container should be able to service.
initial_sids (set[Asset or Int]): The starting sids to watch.
initial_dt (datetime): The datetime to start collecting history from.
bar_data (BarData): If this container is being constructed during
handle_data, this is the BarData for the current bar to fill the
buffer with. If this is constructed elsewhere, it is None.
Returns:
An instance of a new HistoryContainer
"""
# History specs to be served by this container.
self.history_specs = history_specs
self.largest_specs = compute_largest_specs(
itervalues(self.history_specs)
)
# The set of fields specified by all history specs
self.fields = pd.Index(
sorted(set(spec.field for spec in itervalues(history_specs)))
)
self.sids = pd.Index(
sorted(set(initial_sids or []))
)
self.data_frequency = data_frequency
initial_dt = normalize_to_data_freq(self.data_frequency, initial_dt)
# This panel contains raw minutes for periods that haven't been fully
# completed. When a frequency period rolls over, these minutes are
# digested using some sort of aggregation call on the panel (e.g. `sum`
# for volume, `max` for high, `min` for low, etc.).
self.buffer_panel = self.create_buffer_panel(initial_dt, bar_data)
# Dictionaries with Frequency objects as keys.
self.digest_panels, self.cur_window_starts, self.cur_window_closes = \
self.create_digest_panels(initial_sids, initial_dt)
# Helps prop up the prior day panel against having a nan, when the data
# has been seen.
self.last_known_prior_values = pd.DataFrame(
data=None,
index=self.prior_values_index,
columns=self.prior_values_columns,
# Note: For bizarre "intricacies of the spaghetti that is pandas
# indexing logic" reasons, setting this dtype prevents indexing
# errors in update_last_known_values. This is safe for the time
# being because our only forward-fillable fields are floats. If we
# need to add a non-float-typed forward-fillable field, then we may
# find ourselves having to track down and fix a pandas bug.
dtype=np.float64,
)
_ffillable_fields = None
@property
def ffillable_fields(self):
if self._ffillable_fields is None:
fillables = self.fields.intersection(HistorySpec.FORWARD_FILLABLE)
self._ffillable_fields = fillables
return self._ffillable_fields
@property
def prior_values_index(self):
index_values = list(
product(
(freq.freq_str for freq in self.unique_frequencies),
# Only store prior values for forward-fillable fields.
self.ffillable_fields,
)
)
if index_values:
return pd.MultiIndex.from_tuples(index_values)
else:
# MultiIndex doesn't gracefully support empty input, so we return
# an empty regular Index if we have values.
return pd.Index(index_values)
@property
def prior_values_columns(self):
return self.sids
@property
def all_panels(self):
yield self.buffer_panel
for panel in self.digest_panels.values():
yield panel
@property
def unique_frequencies(self):
"""
Return an iterator over all the unique frequencies serviced by this
container.
"""
return iterkeys(self.largest_specs)
@with_environment()
def _add_frequency(self, spec, dt, data, env=None):
"""
Adds a new frequency to the container. This reshapes the buffer_panel
if needed.
"""
freq = spec.frequency
self.largest_specs[freq] = spec
new_buffer_len = 0
if freq.max_bars > self.buffer_panel.window_length:
# More bars need to be held in the buffer_panel to support this
# freq
if freq.data_frequency \
!= self.buffer_spec.frequency.data_frequency:
# If the data_frequencies are not the same, then we need to
# create a fresh buffer.
self.buffer_panel = self.create_buffer_panel(
dt, bar_data=data,
)
new_buffer_len = None
else:
# The frequencies are the same, we just need to add more bars.
self._resize_panel(
self.buffer_panel,
freq.max_bars,
dt,
self.buffer_spec.frequency,
)
new_buffer_len = freq.max_minutes
# update the current buffer_spec to reflect the new lenght.
self.buffer_spec.bar_count = new_buffer_len + 1
if spec.bar_count > 1:
# This spec has more than one bar, construct a digest panel for it.
self.digest_panels[freq] = self._create_digest_panel(
dt, spec=spec, env=env,
)
else:
self.cur_window_starts[freq] = dt
self.cur_window_closes[freq] = freq.window_close(
self.cur_window_starts[freq]
)
self.last_known_prior_values = self.last_known_prior_values.reindex(
index=self.prior_values_index,
)
return FrequencyDelta(freq, new_buffer_len)
def _add_field(self, field):
"""
Adds a new field to the container.
"""
# self.fields is already sorted, so we just need to insert the new
# field in the correct index.
ls = list(self.fields)
insort_left(ls, field)
self.fields = pd.Index(ls)
# unset fillable fields cache
self._ffillable_fields = None
self._realign_fields()
self.last_known_prior_values = self.last_known_prior_values.reindex(
index=self.prior_values_index,
)
return field
@with_environment()
def _add_length(self, spec, dt, env=None):
"""
Increases the length of the digest panel for spec.frequency. If this
does not have a panel, and one is needed; a digest panel will be
constructed.
"""
old_count = self.largest_specs[spec.frequency].bar_count
self.largest_specs[spec.frequency] = spec
delta = spec.bar_count - old_count
panel = self.digest_panels.get(spec.frequency)
if panel is None:
# The old length for this frequency was 1 bar, meaning no digest
# panel was held. We must construct a new one here.
panel = self._create_digest_panel(
dt, spec=spec, env=env,
)
else:
self._resize_panel(
panel, spec.bar_count - 1, dt, freq=spec.frequency, env=env,
)
self.digest_panels[spec.frequency] = panel
return LengthDelta(spec.frequency, delta)
@with_environment()
def _resize_panel(self, panel, size, dt, freq, env=None):
"""
Resizes a panel, fills the date_buf with the correct values.
"""
# This is the oldest datetime that will be shown in the current window
# of the panel.
oldest_dt = pd.Timestamp(panel.start_date, tz='utc',)
delta = size - panel.window_length
# Construct the missing dates.
missing_dts = self._create_window_date_buf(
delta, freq.unit_str, freq.data_frequency, oldest_dt,
)
panel.extend_back(missing_dts)
@with_environment()
def _create_window_date_buf(self,
window,
unit_str,
data_frequency,
dt,
env=None):
"""
Creates a window length date_buf looking backwards from dt.
"""
if unit_str == 'd':
# Get the properly key'd datetime64 out of the pandas Timestamp
if data_frequency != 'daily':
arr = env.open_close_window(
dt,
window,
offset=-window,
).market_close.astype('datetime64[ns]').values
else:
arr = env.open_close_window(
dt,
window,
offset=-window,
).index.values
return arr
else:
return env.market_minute_window(
env.previous_market_minute(dt),
window,
step=-1,
)[::-1].values
@with_environment()
def _create_panel(self, dt, spec, env=None):
"""
Constructs a rolling panel with a properly aligned date_buf.
"""
dt = normalize_to_data_freq(spec.frequency.data_frequency, dt)
window = spec.bar_count - 1
date_buf = self._create_window_date_buf(
window,
spec.frequency.unit_str,
spec.frequency.data_frequency,
dt,
env=env,
)
panel = RollingPanel(
window=window,
items=self.fields,
sids=self.sids,
initial_dates=date_buf,
)
return panel
@with_environment()
def _create_digest_panel(self,
dt,
spec,
window_starts=None,
window_closes=None,
env=None):
"""
Creates a digest panel, setting the window_starts and window_closes.
If window_starts or window_closes are None, then self.cur_window_starts
or self.cur_window_closes will be used.
"""
freq = spec.frequency
window_starts = window_starts if window_starts is not None \
else self.cur_window_starts
window_closes = window_closes if window_closes is not None \
else self.cur_window_closes
window_starts[freq] = freq.normalize(dt)
window_closes[freq] = freq.window_close(window_starts[freq])
return self._create_panel(dt, spec, env=env)
def ensure_spec(self, spec, dt, bar_data):
"""
Ensure that this container has enough space to hold the data for the
given spec. This returns a HistoryContainerDelta to represent the
changes in shape that the container made to support the new
HistorySpec.
"""
updated = {}
if spec.field not in self.fields:
updated['field'] = self._add_field(spec.field)
if spec.frequency not in self.largest_specs:
updated['frequency_delta'] = self._add_frequency(
spec, dt, bar_data,
)
if spec.bar_count > self.largest_specs[spec.frequency].bar_count:
updated['length_delta'] = self._add_length(spec, dt)
return HistoryContainerDelta(**updated)
def add_sids(self, to_add):
"""
Add new sids to the container.
"""
self.sids = pd.Index(
sorted(self.sids.union(_ensure_index(to_add))),
)
self._realign_sids()
def drop_sids(self, to_drop):
"""
Remove sids from the container.
"""
self.sids = pd.Index(
sorted(self.sids.difference(_ensure_index(to_drop))),
)
self._realign_sids()
def _realign_sids(self):
"""
Realign our constituent panels after adding or removing sids.
"""
self.last_known_prior_values = self.last_known_prior_values.reindex(
columns=self.sids,
)
for panel in self.all_panels:
panel.set_minor_axis(self.sids)
def _realign_fields(self):
self.last_known_prior_values = self.last_known_prior_values.reindex(
index=self.prior_values_index,
)
for panel in self.all_panels:
panel.set_items(self.fields)
@with_environment()
def create_digest_panels(self,
initial_sids,
initial_dt,
env=None):
"""
Initialize a RollingPanel for each unique panel frequency being stored
by this container. Each RollingPanel pre-allocates enough storage
space to service the highest bar-count of any history call that it
serves.
"""
# Map from frequency -> first/last minute of the next digest to be
# rolled for that frequency.
first_window_starts = {}
first_window_closes = {}
# Map from frequency -> digest_panels.
panels = {}
for freq, largest_spec in iteritems(self.largest_specs):
if largest_spec.bar_count == 1:
# No need to allocate a digest panel; this frequency will only
# ever use data drawn from self.buffer_panel.
first_window_starts[freq] = freq.normalize(initial_dt)
first_window_closes[freq] = freq.window_close(
first_window_starts[freq]
)
continue
dt = initial_dt
rp = self._create_digest_panel(
dt,
spec=largest_spec,
window_starts=first_window_starts,
window_closes=first_window_closes,
env=env,
)
panels[freq] = rp
return panels, first_window_starts, first_window_closes
def create_buffer_panel(self, initial_dt, bar_data):
"""
Initialize a RollingPanel containing enough minutes to service all our
frequencies.
"""
max_bars_needed = max(
freq.max_bars for freq in self.unique_frequencies
)
freq = '1m' if self.data_frequency == 'minute' else '1d'
spec = HistorySpec(
max_bars_needed + 1, freq, None, None, self.data_frequency,
)
rp = self._create_panel(
initial_dt, spec,
)
self.buffer_spec = spec
if bar_data is not None:
frame = self.frame_from_bardata(bar_data, initial_dt)
rp.add_frame(initial_dt, frame)
return rp
def convert_columns(self, values):
"""
If columns have a specific type you want to enforce, overwrite this
method and return the transformed values.
"""
return values
def digest_bars(self, history_spec, do_ffill):
"""
Get the last (history_spec.bar_count - 1) bars from self.digest_panel
for the requested HistorySpec.
"""
bar_count = history_spec.bar_count
if bar_count == 1:
# slicing with [1 - bar_count:] doesn't work when bar_count == 1,
# so special-casing this.
res = pd.DataFrame(index=[], columns=self.sids, dtype=float)
return res.values, res.index
field = history_spec.field
# Panel axes are (field, dates, sids). We want just the entries for
# the requested field, the last (bar_count - 1) data points, and all
# sids.
digest_panel = self.digest_panels[history_spec.frequency]
frame = digest_panel.get_current(field, raw=True)
if do_ffill:
# Do forward-filling *before* truncating down to the requested
# number of bars. This protects us from losing data if an illiquid
# stock has a gap in its price history.
filled = ffill_digest_frame_from_prior_values(
history_spec.frequency,
history_spec.field,
frame,
self.last_known_prior_values,
raw=True
# Truncate only after we've forward-filled
)
indexer = slice(1 - bar_count, None)
return filled[indexer], digest_panel.current_dates()[indexer]
else:
indexer = slice(1 - bar_count, None)
return frame[indexer, :], digest_panel.current_dates()[indexer]
def buffer_panel_minutes(self,
buffer_panel,
earliest_minute=None,
latest_minute=None,
raw=False):
"""
Get the minutes in @buffer_panel between @earliest_minute and
@latest_minute, inclusive.
@buffer_panel can be a RollingPanel or a plain Panel. If a
RollingPanel is supplied, we call `get_current` to extract a Panel
object.
If no value is specified for @earliest_minute, use all the minutes we
have up until @latest minute.
If no value for @latest_minute is specified, use all values up until
the latest minute.
"""
if isinstance(buffer_panel, RollingPanel):
buffer_panel = buffer_panel.get_current(start=earliest_minute,
end=latest_minute,
raw=raw)
return buffer_panel
# Using .ix here rather than .loc because loc requires that the keys
# are actually in the index, whereas .ix returns all the values between
# earliest_minute and latest_minute, which is what we want.
return buffer_panel.ix[:, earliest_minute:latest_minute, :]
def frame_from_bardata(self, data, algo_dt):
"""
Create a DataFrame from the given BarData and algo dt.
"""
data = data._data
frame_data = np.empty((len(self.fields), len(self.sids))) * np.nan
for j, sid in enumerate(self.sids):
sid_data = data.get(sid)
if not sid_data:
continue
if algo_dt != sid_data['dt']:
continue
for i, field in enumerate(self.fields):
frame_data[i, j] = sid_data.get(field, np.nan)
return pd.DataFrame(
frame_data,
index=self.fields.copy(),
columns=self.sids.copy(),
)
def update(self, data, algo_dt):
"""
Takes the bar at @algo_dt's @data, checks to see if we need to roll any
new digests, then adds new data to the buffer panel.
"""
frame = self.frame_from_bardata(data, algo_dt)
self.update_last_known_values()
self.update_digest_panels(algo_dt, self.buffer_panel)
self.buffer_panel.add_frame(algo_dt, frame)
def update_digest_panels(self, algo_dt, buffer_panel, freq_filter=None):
"""
Check whether @algo_dt is greater than cur_window_close for any of our
frequencies. If so, roll a digest for that frequency using data drawn
from @buffer panel and insert it into the appropriate digest panels.
If @freq_filter is specified, only use the given data to update
frequencies on which the filter returns True.
This takes `buffer_panel` as an argument rather than using
self.buffer_panel so that this method can be used to add supplemental
data from an external source.
"""
for frequency in filter(freq_filter, self.unique_frequencies):
# We don't keep a digest panel if we only have a length-1 history
# spec for a given frequency
digest_panel = self.digest_panels.get(frequency, None)
while algo_dt > self.cur_window_closes[frequency]:
earliest_minute = self.cur_window_starts[frequency]
latest_minute = self.cur_window_closes[frequency]
minutes_to_process = self.buffer_panel_minutes(
buffer_panel,
earliest_minute=earliest_minute,
latest_minute=latest_minute,
raw=True
)
if digest_panel is not None:
# Create a digest from minutes_to_process and add it to
# digest_panel.
digest_frame = self.create_new_digest_frame(
minutes_to_process,
self.fields,
self.sids
)
digest_panel.add_frame(
latest_minute,
digest_frame,
self.fields,
self.sids
)
# Update panel start/close for this frequency.
self.cur_window_starts[frequency] = \
frequency.next_window_start(latest_minute)
self.cur_window_closes[frequency] = \
frequency.window_close(self.cur_window_starts[frequency])
def frame_to_series(self, field, frame, columns=None):
"""
Convert a frame with a DatetimeIndex and sid columns into a series with
a sid index, using the aggregator defined by the given field.
"""
if isinstance(frame, pd.DataFrame):
columns = frame.columns
frame = frame.values
if not len(frame):
return pd.Series(
data=(0 if field == 'volume' else np.nan),
index=columns,
).values
if field in ['price', 'close_price']:
# shortcircuit for full last row
vals = frame[-1]
if np.all(~np.isnan(vals)):
return vals
return ffill(frame)[-1]
elif field == 'open_price':
return bfill(frame)[0]
elif field == 'volume':
return np.nansum(frame, axis=0)
elif field == 'high':
return np.nanmax(frame, axis=0)
elif field == 'low':
return np.nanmin(frame, axis=0)
else:
raise ValueError("Unknown field {}".format(field))
def aggregate_ohlcv_panel(self,
fields,
ohlcv_panel,
items=None,
minor_axis=None):
"""
Convert an OHLCV Panel into a DataFrame by aggregating each field's
frame into a Series.
"""
vals = ohlcv_panel
if isinstance(ohlcv_panel, pd.Panel):
vals = ohlcv_panel.values
items = ohlcv_panel.items
minor_axis = ohlcv_panel.minor_axis
data = [
self.frame_to_series(
field,
vals[items.get_loc(field)],
minor_axis
)
for field in fields
]
return np.array(data)
def create_new_digest_frame(self, buffer_minutes, items=None,
minor_axis=None):
"""
Package up minutes in @buffer_minutes into a single digest frame.
"""
return self.aggregate_ohlcv_panel(
self.fields,
buffer_minutes,
items=items,
minor_axis=minor_axis
)
def update_last_known_values(self):
"""
Store the non-NaN values from our oldest frame in each frequency.
"""
ffillable = self.ffillable_fields
if not len(ffillable):
return
for frequency in self.unique_frequencies:
digest_panel = self.digest_panels.get(frequency, None)
if digest_panel:
oldest_known_values = digest_panel.oldest_frame(raw=True)
else:
oldest_known_values = self.buffer_panel.oldest_frame(raw=True)
oldest_vals = oldest_known_values
oldest_columns = self.fields
for field in ffillable:
f_idx = oldest_columns.get_loc(field)
field_vals = oldest_vals[f_idx]
# isnan would be fast, possible to use?
non_nan_sids = np.where(pd.notnull(field_vals))
key = (frequency.freq_str, field)
key_loc = self.last_known_prior_values.index.get_loc(key)
self.last_known_prior_values.values[
key_loc, non_nan_sids
] = field_vals[non_nan_sids]
def get_history(self, history_spec, algo_dt):
"""
Main API used by the algoscript is mapped to this function.
Selects from the overarching history panel the values for the
@history_spec at the given @algo_dt.
"""
field = history_spec.field
do_ffill = history_spec.ffill
# Get our stored values from periods prior to the current period.
digest_frame, index = self.digest_bars(history_spec, do_ffill)
# Get minutes from our buffer panel to build the last row of the
# returned frame.
buffer_panel = self.buffer_panel_minutes(
self.buffer_panel,
earliest_minute=self.cur_window_starts[history_spec.frequency],
raw=True
)
buffer_frame = buffer_panel[self.fields.get_loc(field)]
if do_ffill:
buffer_frame = ffill_buffer_from_prior_values(
history_spec.frequency,
field,
buffer_frame,
digest_frame,
self.last_known_prior_values,
raw=True
)
last_period = self.frame_to_series(field, buffer_frame, self.sids)
return fast_build_history_output(digest_frame,
last_period,
algo_dt,
index=index,
columns=self.sids)
def fast_build_history_output(buffer_frame,
last_period,
algo_dt,
index=None,
columns=None):
"""
Optimized concatenation of DataFrame and Series for use in
HistoryContainer.get_history.
Relies on the fact that the input arrays have compatible shapes.
"""
buffer_values = buffer_frame
if isinstance(buffer_frame, pd.DataFrame):
buffer_values = buffer_frame.values
index = buffer_frame.index
columns = buffer_frame.columns
return pd.DataFrame(
data=np.vstack(
[
buffer_values,
last_period,
]
),
index=fast_append_date_to_index(
index,
pd.Timestamp(algo_dt)
),
columns=columns,
)
def fast_append_date_to_index(index, timestamp):
"""
Append a timestamp to a DatetimeIndex. DatetimeIndex.append does not
appear to work.
"""
return pd.DatetimeIndex(
np.hstack(
[
index.values,
[timestamp.asm8],
]
),
tz='UTC',
)
| apache-2.0 |
rrohan/scikit-learn | sklearn/tests/test_learning_curve.py | 225 | 10791 | # Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
ghchinoy/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_test.py | 6 | 60842 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column_lib as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
model_ops = dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN,
params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
wire_var = dnn_linear_combined._get_embedding_variable(
embedding_wire, 'dnn', 'dnn/input_from_feature_columns')
for _ in range(2):
_, language_value, wire_value = sess.run(
[model_ops.train_op, language_var, wire_var])
initial_value = np.full_like(language_value, 0.1)
self.assertTrue(np.all(np.isclose(language_value, initial_value)))
self.assertFalse(np.all(np.isclose(wire_value, initial_value)))
class ActivationFunctionTest(test.TestCase):
def _getModelForActivation(self, activation_fn):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'feature_columns': [embedding_language],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
'activation_fn': activation_fn,
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
return dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params)
def testValidActivation(self):
_ = self._getModelForActivation('relu')
def testRaisesOnBadActivationName(self):
with self.assertRaisesRegexp(ValueError,
'Activation name should be one of'):
self._getModelForActivation('max_pool')
class DNNEstimatorTest(test.TestCase):
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNEstimator)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
dnn_estimator = dnn.DNNEstimator(
head=head_lib.multi_class_head(2, weight_column_name='w'),
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
dnn_estimator.fit(input_fn=_input_fn_train, steps=5)
scores = dnn_estimator.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
class DNNClassifierTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNClassifier)
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn.DNNClassifier(
feature_columns=[embedding_language],
hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertEqual(1, classifier.params['input_layer_min_slice_size'])
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
classifier.fit(input_fn=input_fn, steps=5)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=5)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def _assertBinaryPredictions(self, expected_len, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, (0, 1))
def _assertClassificationPredictions(
self, expected_len, n_classes, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, range(n_classes))
def _assertProbabilities(self, expected_batch_size, expected_n_classes,
probabilities):
self.assertEqual(expected_batch_size, len(probabilities))
for b in range(expected_batch_size):
self.assertEqual(expected_n_classes, len(probabilities[b]))
for i in range(expected_n_classes):
self._assertInRange(0.0, 1.0, probabilities[b][i])
def testEstimatorWithCoreFeatureColumns(self):
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = fc_core.categorical_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
fc_core.embedding_column(language_column, dimension=1),
fc_core.numeric_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_FloatLabel(self):
"""Tests binary classification with float labels."""
def _input_fn_float_label(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[50], [20], [10]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_float_label, steps=50)
predict_input_fn = functools.partial(_input_fn_float_label, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predictions_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self._assertProbabilities(3, 2, predictions_proba)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=200)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=200)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[10, 10],
label_keys=label_keys,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testPredict_AsIterableFalse(self):
"""Tests predict and predict_prob methods with as_iterable=False."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
n_classes = 3
classifier = dnn.DNNClassifier(
n_classes=n_classes,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predicted_classes = classifier.predict_classes(
input_fn=_input_fn, as_iterable=False)
self._assertClassificationPredictions(3, n_classes, predicted_classes)
predictions = classifier.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllEqual(predicted_classes, predictions)
probabilities = classifier.predict_proba(
input_fn=_input_fn, as_iterable=False)
self._assertProbabilities(3, n_classes, probabilities)
def testPredict_AsIterable(self):
"""Tests predict and predict_prob methods with as_iterable=True."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
n_classes = 3
classifier = dnn.DNNClassifier(
n_classes=n_classes,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=300)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertClassificationPredictions(3, n_classes, predicted_classes)
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predicted_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self._assertProbabilities(3, n_classes, predicted_proba)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.cast(labels, dtypes.float32)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
labels = math_ops.cast(labels, predictions.dtype)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn.DNNClassifier(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
model_dir = tempfile.mkdtemp()
classifier = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions1 = classifier.predict_classes(input_fn=predict_input_fn)
del classifier
classifier2 = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = classifier2.predict_classes(input_fn=predict_input_fn)
self.assertEqual(list(predictions1), list(predictions2))
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
feature_columns = [
feature_column.real_valued_column('age'),
feature_column.embedding_column(
language, dimension=1)
]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=5)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertIn('dnn/multi_class_head/centered_bias_weight',
classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
class DNNRegressorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNRegressor(
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
regressor.fit(input_fn=input_fn, steps=200)
scores = regressor.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(x=train_x, y=train_y, steps=200)
scores = regressor.evaluate(x=train_x, y=train_y, steps=1)
self.assertIn('loss', scores)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def _assertRegressionOutputs(
self, predictions, expected_shape):
predictions_nparray = np.array(predictions)
self.assertAllEqual(expected_shape, predictions_nparray.shape)
self.assertTrue(np.issubdtype(predictions_nparray.dtype, np.floating))
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predicted_scores = regressor.predict_scores(
input_fn=_input_fn, as_iterable=False)
self._assertRegressionOutputs(predicted_scores, [3])
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predicted_scores, predictions)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_scores = list(
regressor.predict_scores(
input_fn=predict_input_fn, as_iterable=True))
self._assertRegressionOutputs(predicted_scores, [3])
predictions = list(
regressor.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predicted_scores, predictions)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics that use MetricSpec."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn.DNNRegressor(
feature_columns=feature_columns, hidden_units=[3, 3], config=config)
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertIn('dnn/regression_head/centered_bias_weight',
regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertNotIn('centered_bias_weight', regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def boston_input_fn():
boston = base.load_boston()
features = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),
dtypes.float32)
labels = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),
dtypes.float32)
return features, labels
class FeatureColumnTest(test.TestCase):
def testTrain(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = dnn.DNNRegressor(feature_columns=feature_columns, hidden_units=[3, 3])
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
timvandermeij/mobile-radio-tomography | planning/Runner.py | 3 | 12989 | # Core imports
import itertools
import thread
# Library imports
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
# Package imports
from Problem import Reconstruction_Plan_Continuous, Reconstruction_Plan_Discrete
from ..core.Threadable import Threadable
class Planning_Runner(Threadable):
"""
A supervisor class that handles running the evolutionary algorithm on the
reconstruction planning problem and creates results and plots from the
output of the algorithm.
"""
def __init__(self, arguments, thread_manager, import_manager,
iteration_callback=None):
super(Planning_Runner, self).__init__("planning_runner", thread_manager)
self.arguments = arguments
self.settings = self.arguments.get_settings("planning_runner")
self._import_manager = import_manager
self._iteration_callback = iteration_callback
self.reset()
def _get_problem(self):
if self.settings.get("discrete"):
problem_class = Reconstruction_Plan_Discrete
else:
problem_class = Reconstruction_Plan_Continuous
return problem_class(self.arguments, self._import_manager)
def reset(self):
"""
Reset the algorithm and problem state.
"""
self.problem = self._get_problem()
algo = self.settings.get("algorithm_class")
algo_class = self._import_manager.load_class(algo, module="Algorithm",
relative_module="planning")
self.algorithm = algo_class(self.problem, self.arguments)
self.algorithm.set_iteration_callback(self._handle_algorithm_data)
# Whether the algorithm is done running.
self.done = False
# Whether the algorithm should halt immediately once it detects the
# signal to deactivate.
self._halt = False
# Iteration from which we received the data from the iteration callback
self.current_iteration = 0
# Population of individuals
self.P = np.empty(0)
# Feasibility of individuals
self.Feasible = np.empty(0)
# Objective values of individuals
self.Objectives = np.empty(0)
# Nondominated layers of solution objectives
self.R = []
def _save(self, P, Objectives, Feasible):
self.P = np.copy(P)
self.Objectives = np.copy(Objectives)
self.Feasible = np.copy(Feasible)
self.R = self.algorithm.sort_nondominated(self.Objectives)
def _handle_algorithm_data(self, algorithm, data):
# Pass through to the actual algorithm iteration callback, and track
# the current variables in the runner as well.
if self._iteration_callback is not None:
self.current_iteration = data["iteration"]
self._save(data["population"], data["objectives"], data["feasible"])
self._iteration_callback(algorithm, data)
def activate(self):
"""
Run the algorithm in its own thread.
"""
super(Planning_Runner, self).activate()
self.reset()
thread.start_new_thread(self.start, ())
def deactivate(self):
"""
Halt the algorithm if it is currently running.
This throws away the results of the algorithm.
This makes the iteration limit invalid, so any later runs must either
reinstantiate the entire runner or set the iteration limit again.
"""
super(Planning_Runner, self).deactivate()
self.stop()
self._halt = True
def start(self):
"""
Run the algorithm.
Returns a list of feasible indices, sorted on the first objective.
"""
try:
P, Objectives, Feasible = self.algorithm.evolve()
if self._halt:
return []
self.current_iteration = self.get_iteration_current()
self._save(P, Objectives, Feasible)
except:
if self._halt:
return []
super(Planning_Runner, self).interrupt()
return []
self.done = True
return self.get_indices()
def stop(self):
"""
Stop the algorithm if it is currently running.
Compared to `deactivate`, this allows the run to finish at the current
iteration normally, making it possible to use its results.
This makes the iteration limit invalid, so any later runs must either
reinstantiate the entire runner or set the iteration limit again.
"""
self.set_iteration_limit(0)
def finish(self):
"""
Finalize the data from the run and mark it as finished.
"""
def get_indices(self, sort=0):
"""
Get the indices of the population list that are feasible.
The resulting list is sorted according to the objective values with
index given in `sort`. If the sort index is negative, then the indices
are not sorted.
If the algorithm does not yet have (intermediate) results, an empty list
is returned.
"""
if self.Feasible.size == 0:
return []
indices = [i for i in range(self.get_population_size()) if self.Feasible[i]]
if sort >= 0:
indices = sorted(indices, key=lambda i: self.Objectives[i][sort])
return indices
def get_objectives(self, i):
"""
Get the objective values for an individual with index `i` from a run of
the algorithm.
If the algorithm does not yet have (intermediate) results, `None` is
returned.
"""
if self.Objectives.size == 0:
return None
return self.Objectives[i]
def find_objectives(self, objectives):
"""
Get the indices of the individuals that have the given objective values
`objectives`.
If the algorithm does not yet have (intermediate) results, an empty
list is returned.
"""
if self.Objectives.size == 0:
return []
return np.nonzero(np.all(self.Objectives == objectives, axis=1))[0]
def is_feasible(self, i):
"""
Check whether the individual with index `i` is feasible.
If the algorithm does not yet have (intermediate) results, `False` is
returned.
"""
if self.Feasible.size == 0:
return False
return self.Feasible[i]
def get_positions(self, i):
"""
Given an index `i` of an individual from a run of the algorithm, return
the positions and unsnappable count of that solution.
If the algorithm does not yet have (intermediate) results or if the
solution is not feasible, then this method returns an empty numpy array
and zero instead.
"""
if self.Feasible.size == 0:
return np.empty(0), 0
weight_matrix = self.problem.get_weight_matrix()
return self.problem.get_positions(self.P[i], weight_matrix)
def get_positions_plot(self, i, plot_number, count, layer=None, axes=None):
"""
Given an index `i` of an individual from a run of the algorithm, create
a matplotlib plot for the display of the positions of the vehicles and
and measurement lines, and return the positions and unsnappable count.
The `plot_number` is a number to be shown in the plot to give it another
unique identifier for the layer the individual is in, and `count` is the
number of individuals of that layer.
If the algorithm does not yet have (intermediate) results, if the
individual is not feasible, or if a `layer` is given and the individual
is not in that layer, then this method returns an empty numpy array and
zero instead.
If `axes` is given, then the plot is drawn on those matplotlib axes
instead of the current plot figure.
"""
if self.Feasible.size == 0 or not self.Feasible[i]:
return np.empty(0), 0
if layer is not None and i not in self.R[layer]:
return np.empty(0), 0
positions, unsnappable = self.get_positions(i)
if axes is None:
axes = plt.gca()
axes.cla()
obj = []
for f, name in enumerate(self.problem.get_objective_names()):
obj.append("f{} ({}): {}".format(f+1, name, self.Objectives[i][f]))
title_format = "Sensor positions for solution #{}/{} (index {})\n{}"
objectives = ", ".join(obj)
axes.set_title(title_format.format(plot_number, count, i, objectives))
# Create axes with limits that keep the network visible, make the plot
# square and display ticks and a grid at the network coordinates.
axes.set_xlabel("x coordinate")
axes.set_ylabel("y coordinate")
axes.set_xlim([-0.1, self.problem.network_size[0] + 0.1])
axes.set_ylim([-0.1, self.problem.network_size[1] + 0.1])
axes.set_aspect('equal', adjustable='box')
axes.set_xticks(range(self.problem.network_size[0] + 1))
axes.set_yticks(range(self.problem.network_size[1] + 1))
axes.grid(True)
# Make network size with padding visible
axes.add_patch(Rectangle(
(self.problem.padding[0], self.problem.padding[1]),
self.problem.network_size[0] - self.problem.padding[0] * 2,
self.problem.network_size[1] - self.problem.padding[1] * 2,
alpha=0.2, edgecolor="grey"
))
# Plot the measurement lines between locations as well as the vehicle
# sensor locations themselves as circles.
lines = [[tuple(p[:, 0]), tuple(p[:, 1])] for p in positions]
axes.plot(*itertools.chain(*lines))
axes.plot(positions[:, :, 0].flatten(), positions[:, :, 1].flatten(), 'ro')
return positions, unsnappable
def make_pareto_plot(self, axes=None):
"""
Create a plot of the Pareto front of all the feasible solutions in
the sorted nondominated layers.
If `axes` is given, then the plot is drawn on those matplotlib axes
instead of the current plot figure.
If the algorithm does not yet have (intermediate) results, then this
method does nothing.
"""
if self.Feasible.size == 0:
return
if axes is None:
axes = plt.gca()
axes.cla()
axes.set_title("Pareto front with {}, t={}".format(self.algorithm.get_name(), self.current_iteration))
names = self.problem.get_objective_names()
axes.set_xlabel("Objective 1 ({})".format(names[0]))
axes.set_ylabel("Objective 2 ({})".format(names[1]))
for Rk in self.R:
# Plot the front line of objective values for feasible individuals.
# Enable the picker events for uses in the control panel.
o1 = [self.Objectives[i][0] for i in Rk if self.Feasible[i]]
o2 = [self.Objectives[i][1] for i in Rk if self.Feasible[i]]
axes.plot(o1, o2, marker='o', picker=5)
def get_assignment(self, i, export=True):
"""
Given an index `i` of an individual from a run of the algorithm, return
the dictionary of ordered waypoint assignments to vehicles. If `export`
is `True`, then the waypoints are lists that can be exported as JSON.
Set `export` to `False` to receive `Waypoint` objects instead.
If the algorithm does not yet have (intermediate) results or if the
solution is not feasible, then this method returns an empty dictionary.
"""
positions = self.get_positions(i)[0]
if positions.size == 0:
return {}
assignment = self.problem.assigner.assign(positions, export=export)[0]
return assignment
def get_iteration_current(self):
"""
Get the current iteration number that the algorithm is at.
This is different from `Planning_Runner.current_iteration` which is the
iteration number from which cached callback data is from.
"""
return self.algorithm.t_current
def get_iteration_limit(self):
"""
Get the maximum number of iterations that the algorithm performs.
"""
return self.algorithm.t_max
def set_iteration_limit(self, t_max):
"""
Change the number of iterations that the algorithm performs from the
current value, which is by default the `iteration_limit` setting value.
This only has an effect when the algorithm has not yet run.
"""
self.algorithm.t_max = t_max
def get_population_size(self):
"""
Get the number of individuals that the algorithm keeps while evolving
them and eventually returns after running.
"""
return self.algorithm.mu
| gpl-3.0 |
rexshihaoren/scikit-learn | sklearn/linear_model/least_angle.py | 42 | 49357 | """
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_X_y
from ..cross_validation import check_cv
from ..utils import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, optional (default=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, optional (default=False)
Whether to return the number of iterations.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://www-stat.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<http://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by the
# test suite. The `equality_tolerance` margin added in 0.16.0 to
# get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_mean, y_mean, X_std)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True):
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps)
for train, test in cv)
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
http://en.wikipedia.org/wiki/Akaike_information_criterion
http://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
| bsd-3-clause |
cedadev/jasmin_cis | cis/test/plot_tests/idiff.py | 3 | 2350 | #!/usr/bin/env python
# (C) British Crown Copyright 2010 - 2014, Met Office
#
# This file was heavily influenced by a similar file in the iris package.
"""
Provides "diff-like" comparison of images.
Currently relies on matplotlib for image processing so limited to PNG format.
"""
from __future__ import (absolute_import, division, print_function)
import os.path
import shutil
import matplotlib.pyplot as plt
import matplotlib.image as mimg
import matplotlib.widgets as mwidget
def diff_viewer(expected_fname, result_fname, diff_fname):
plt.figure(figsize=(16, 16))
plt.suptitle(os.path.basename(expected_fname))
ax = plt.subplot(221)
ax.imshow(mimg.imread(expected_fname))
ax = plt.subplot(222, sharex=ax, sharey=ax)
ax.imshow(mimg.imread(result_fname))
ax = plt.subplot(223, sharex=ax, sharey=ax)
ax.imshow(mimg.imread(diff_fname))
def accept(event):
# removes the expected result, and move the most recent result in
print('ACCEPTED NEW FILE: %s' % (os.path.basename(expected_fname), ))
os.remove(expected_fname)
shutil.copy2(result_fname, expected_fname)
os.remove(diff_fname)
plt.close()
def reject(event):
print('REJECTED: %s' % (os.path.basename(expected_fname), ))
plt.close()
ax_accept = plt.axes([0.6, 0.35, 0.1, 0.075])
ax_reject = plt.axes([0.71, 0.35, 0.1, 0.075])
bnext = mwidget.Button(ax_accept, 'Accept change')
bnext.on_clicked(accept)
bprev = mwidget.Button(ax_reject, 'Reject')
bprev.on_clicked(reject)
plt.show()
def step_over_diffs():
import cis.test.plot_tests
image_dir = os.path.join(os.path.dirname(cis.test.plot_tests.__file__),
'reference', 'visual_tests')
diff_dir = os.path.join(os.path.dirname(cis.test.plot_tests.__file__),
'result_image_comparison')
for expected_fname in sorted(os.listdir(image_dir)):
result_path = os.path.join(diff_dir, expected_fname)
diff_path = result_path[:-4] + '-failed-diff.png'
# if the test failed, there will be a diff file
if os.path.exists(diff_path):
expected_path = os.path.join(image_dir, expected_fname)
diff_viewer(expected_path, result_path, diff_path)
if __name__ == '__main__':
step_over_diffs()
| gpl-3.0 |
466152112/scikit-learn | sklearn/utils/tests/test_testing.py | 144 | 4121 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.lda import LDA
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LDA()
tree = DecisionTreeClassifier()
# LDA doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
semio/ddf_utils | tests/test_transformers.py | 1 | 8168 | # -*- coding: utf-8 -*-
import os
import pandas as pd
import numpy as np
def test_trend_bridge():
from ddf_utils.transformer import trend_bridge
from numpy.testing import assert_almost_equal
tr1 = pd.Index(range(0, 5))
tr2 = pd.Index(range(3, 8))
s1 = pd.Series([1, 2, 3, 4, 5], index=tr1)
s2 = pd.Series([6, 7, 8, 9, 10], index=tr2)
res1 = trend_bridge(s1, s2, 3)
assert_almost_equal(res1.values.tolist(), [1, 2 + 2/3, 3 + 4/3, 6, 7, 8, 9, 10])
tr1 = pd.Index(range(0, 5))
tr2 = pd.Index(range(10, 15))
s1 = pd.Series([1, 2, 3, 4, 5], index=tr1)
s2 = pd.Series([6, 7, 8, 9, 10], index=tr2)
res2 = trend_bridge(s1, s2, 3)
assert_almost_equal(res2.values.tolist(), list(range(1, 11)))
tr1 = pd.Index(range(0, 5))
tr2 = pd.Index(range(3, 8))
s1 = pd.Series([1, 2, 3, 4, 5], index=tr1)
s2 = pd.Series([6, 7, 8, 9, 10], index=tr2)
res3 = trend_bridge(s1, s2, 10)
assert_almost_equal(res3.values.tolist(), [1.5, 3, 4.5, 6, 7, 8, 9, 10])
tr1 = [0, 1, 2, 3, 7, 8]
tr2 = [5, 6, 7, 8, 9, 10]
s1 = pd.Series(range(len(tr1)), index=tr1)
s2 = pd.Series(range(len(tr2)), index=tr2)
res4 = trend_bridge(s1, s2, 3)
assert res4.index.values.tolist() == [0, 1, 2, 3, 7, 8, 9, 10]
assert_almost_equal(res4.loc[7], s2.loc[7])
tr1 = pd.date_range('1990', '1995', freq='A')
tr2 = pd.date_range('1994', '2000', freq='A')
s1 = pd.Series([1, 2, 3, 4, 5], index=tr1)
s2 = pd.Series([6, 7, 8, 9, 10, 11], index=tr2)
trend_bridge(s1, s2, 3)
tr1 = pd.Index(range(5, 10))
tr2 = pd.Index(range(3, 8))
s1 = pd.Series([1, 2, 3, 4, 5], index=tr1)
s2 = pd.Series([6, 7, 8, 9, 10], index=tr2)
try:
trend_bridge(s1, s2, 10)
except ValueError:
pass
def test_extract_concept():
from ddf_utils.transformer import extract_concepts
df1 = pd.DataFrame([1, 3, 5], columns=['col1'], index=[2000, 2001, 2002])
df2 = pd.DataFrame([1, 3, 5], columns=['col2'], index=[1990, 1991, 1992])
res1 = extract_concepts([df1, df2])
assert 'col1' in res1.concept.values
assert 'col2' in res1.concept.values
base_df = pd.DataFrame([['col0', 'string'], ['col4', 'string']], columns=['concept', 'concept_type'])
res2 = extract_concepts([df1.reset_index(), df2.reset_index()], base=base_df)
assert 'col0' in res2.concept.values
assert 'col1' in res2.concept.values
def test_merge_keys():
from ddf_utils.transformer import merge_keys
df = pd.DataFrame([['c1', 1992, 1], ['c2', 1992, 2], ['c3', 1992, 3]], columns=['geo', 'time', 'val'])
df2 = df.copy()
df2['geo'] = df2['geo'].astype('category')
di = {'nc': ['c1', 'c2', 'c3']}
res1_1 = merge_keys(df.set_index(['geo', 'time']), di, 'geo')
res1_2 = merge_keys(df2.set_index(['geo', 'time']), di, 'geo')
assert not np.any(res1_1.index.duplicated())
assert not np.any(res1_2.index.duplicated())
assert res1_1.shape[0] == 1
assert res1_1.at[('nc', 1992), 'val'] == 6
assert res1_2.shape[0] == 1
assert res1_2.at[('nc', 1992), 'val'] == 6
assert res1_2.index.get_level_values('geo').dtype.name == 'category'
res2_1 = merge_keys(df.set_index(['geo', 'time']), di, 'geo', merged='keep')
res2_2 = merge_keys(df2.set_index(['geo', 'time']), di, 'geo', merged='keep')
assert not np.any(res2_1.index.duplicated())
assert not np.any(res2_2.index.duplicated())
assert res2_1.shape[0] == 4
assert res2_1.at[('c1', 1992), 'val'] == 1
assert res2_1.at[('nc', 1992), 'val'] == 6
assert res2_2.shape[0] == 4
assert res2_2.at[('c1', 1992), 'val'] == 1
assert res2_2.at[('nc', 1992), 'val'] == 6
assert res2_2.index.get_level_values('geo').dtype.name == 'category'
def test_split_keys():
from ddf_utils.transformer import split_keys
df = pd.DataFrame([['n0', 1991, 6], ['c1', 1992, 1], ['c2', 1992, 2], ['c3', 1992, 3]],
columns=['geo', 'time', 'val'])
df2 = df.copy()
df2['geo'] = df2['geo'].astype('category')
di = {'n0': ['c1', 'c2', 'c3']}
res1_1 = split_keys(df.set_index(['geo', 'time']), 'geo', di)
res1_2 = split_keys(df2.set_index(['geo', 'time']), 'geo', di)
assert res1_1.at[('c1', 1991), 'val'] == 1
assert res1_1.at[('c2', 1991), 'val'] == 2
assert res1_1.at[('c3', 1991), 'val'] == 3
assert res1_1.at[('c3', 1992), 'val'] == 3
assert res1_2.at[('c1', 1991), 'val'] == 1
assert res1_2.at[('c2', 1991), 'val'] == 2
assert res1_2.at[('c3', 1991), 'val'] == 3
assert res1_2.at[('c3', 1992), 'val'] == 3
assert 'n0' not in res1_1.index.get_level_values('geo')
assert 'n0' not in res1_2.index.get_level_values('geo')
assert res1_2.index.get_level_values('geo').dtype.name == 'category'
res2_1 = split_keys(df.set_index(['geo', 'time']), 'geo', di, splited='keep')
res2_2 = split_keys(df2.set_index(['geo', 'time']), 'geo', di, splited='keep')
assert res2_1.at[('c1', 1991), 'val'] == 1
assert res2_1.at[('c2', 1991), 'val'] == 2
assert res2_1.at[('c3', 1991), 'val'] == 3
assert res2_1.at[('c3', 1992), 'val'] == 3
assert res2_2.at[('c1', 1991), 'val'] == 1
assert res2_2.at[('c2', 1991), 'val'] == 2
assert res2_2.at[('c3', 1991), 'val'] == 3
assert res2_2.at[('c3', 1992), 'val'] == 3
assert res2_2.at[('n0', 1991), 'val'] == 6
assert res2_2.at[('n0', 1991), 'val'] == 6
assert res2_2.index.get_level_values('geo').dtype.name == 'category'
def test_extract_concepts():
from ddf_utils.transformer import extract_concepts
geo_data = [{'geo': 'abkh',
'is--country': 'TRUE',
'name': 'Abkhazia',
'world_4region': 'europe',
'world_6region': 'europe_central_asia'},
{'geo': 'afg',
'is--country': 'TRUE',
'name': 'Afghanistan',
'world_4region': 'asia',
'world_6region': 'south_asia'}]
datapoint_data = [{'geo': 'abkh',
'time': 1999,
'indicator': 10},
{'geo': 'afg',
'time': 2000,
'indicator': 20}]
df1 = pd.DataFrame.from_records(geo_data)
df2 = pd.DataFrame.from_records(datapoint_data)
concepts = extract_concepts([df1, df2]).set_index('concept')
assert 'country' in concepts.index
assert 'time' in concepts.index
assert concepts.loc['country', 'concept_type'] == 'entity_set'
def test_translate_column():
# from `translate_column`'s heredoc
from ddf_utils.transformer import translate_column
df = pd.DataFrame([['geo', 'Geographical places'], ['time', 'Year']],
columns=['concept', 'name'])
r1 = translate_column(df, 'concept', 'inline', {'geo': 'country', 'time': 'year'})
base_df = pd.DataFrame([['geo', 'country'], ['time', 'year']],
columns=['concept', 'alternative_name'])
r2 = translate_column(df, 'concept', 'dataframe',
{'key': 'concept', 'value': 'alternative_name'},
target_column='new_name', base_df=base_df)
df2 = pd.DataFrame([['China', 1], ['United State', 2]], columns=['geo', 'value'])
base_df2 = pd.DataFrame([['chn', 'China', 'PRC'],
['usa', 'USA', 'United State']],
columns=['geo', 'alt1', 'alt2'])
r3 = translate_column(df2, 'geo', 'dataframe',
{'key': ['alt1', 'alt2'], 'value': 'geo'},
target_column='new_geo', base_df=base_df2)
print(r3)
def test_translate_header():
from ddf_utils.transformer import translate_header
wd = os.path.dirname(__file__)
df = pd.DataFrame([[0, 1, 2]], columns=['imr_lower', 'imr_median', 'no_name'])
translate_header(df, {'no_name': 'yes_name'})
translate_header(df, os.path.join(wd, 'chef/translation_dictionaries/indicators_cme_to_sg.json'), dictionary_type='file')
try:
translate_header(df, {}, dictionary_type='something')
except ValueError:
pass
| mit |
quchunguang/test | testpy/pythonchallenge.py | 1 | 10832 | #!/usr/bin/env python2
#import string
#import urllib
#import re
#import pickle
#import os
#import zipfile
#import Image
#import bz2
#import matplotlib.pyplot as plt
#import xmlrpclib
# [official]: http://www.pythonchallenge.com
# [answers]: http://wiki.pythonchallenge.com
# [answers]: http://garethrees.org/2007/05/07/python-challenge/
# [some answers in chinese]: http://blog.csdn.net/billstone/article/details/4438785
# print '-' * 20, '#0', '-' * 20
# print 2 ** 38
#
#
# print '-' * 20, '#1', '-' * 20
# str = "g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj."
# tb = string.maketrans("ygqrplcbjsamweknifzduv", "aistrnedlucoygmpkhbfwx")
# print str.translate(tb)
# password = "map"
# print password.translate(tb)
#
#
# print '-' * 20, '#2', '-' * 20
# sock = urllib.urlopen("http://www.pythonchallenge.com/pc/def/ocr.html")
# source = sock.read()
# sock.close()
# source = open('ocr.html', 'rU').read()
# data = re.findall(r'<!--(.+?)-->', source, re.S)
# charList = re.findall(r'([a-zA-Z])', data[1])
# print string.join(charList, sep='')
#
#
# print '-' * 20, '#3', '-' * 20
# with open('equality.html', 'rU') as doc:
# data = re.findall(r'<!--(.+)-->', doc.read(), re.S)
# matches = re.findall(r'[^A-Z][A-Z]{3}([a-z])[A-Z]{3}[^A-Z]', data[0], re.S)
# print ''.join(matches)
#
#
# print '-' * 20, '#4', '-' * 20
# number = ['74795']
# base = 'http://www.pythonchallenge.com/pc/def/linkedlist.php?nothing='
# for i in range(0, 400):
# url = base + number[0]
# data = urllib.urlopen(url).read()
# number = re.findall('([0-9]+)$', data)
# print data
# file.close()
#
#
# print '-' * 20, '#5', '-' * 20
# url = 'http://www.pythonchallenge.com/pc/def/banner.p'
# data = urllib.urlopen(url)
# obj = pickle.load(data)
#
# line = ''
# for row in obj:
# for col in row:
# line += col[0] * col[1]
# print line
# line = ''
#
#
# print '-' * 20, '#6', '-' * 20
# number = '90052'
# while True:
# url = '/tmp/' + number + '.txt'
# data = open(url, 'r').read()
# print data
# number = re.findall(r'([0-9]+)', data)[0]
# OR WHATEVER, USING SHELL INSTEAD,
# for f in *.txt;do cat $f;echo;done | sed '/Next nothing is [0-9]*/d'
#
# number = ['90052']
# f = zipfile.ZipFile('/home/qcg/download/channel.zip')
# print f.comment
# while True:
# filename = number[0] + '.txt'
# print f.getinfo(filename).comment,
# number = re.findall(r'([0-9]+)', f.read(filename), re.S)
#
# print '-' * 20, '#7', '-' * 20
# pic = open('oxygen.png', 'wb')
# pic.write(urllib.urlopen('http://www.pythonchallenge.com/pc/def/oxygen.png').read())
# pic.close()
#
# im = Image.open('oxygen.png')
# print im.format, im.size, im.mode
# x_max, y_max = im.size
# y = y_max / 2
# message = []
# for x in range(1, x_max, 7):
# message.append(im.getpixel((x, y))[0])
# print ''.join(map(chr, message))
#
# message2 = [105, 110, 116, 101, 103, 114, 105, 116, 121]
# print ''.join(map(chr, message2))
#
#
# print '-' * 20, '#8', '-' * 20
# un = 'BZh91AY&SYA\xaf\x82\r\x00\x00\x01\x01\x80\x02\xc0\x02\x00 \x00!\x9ah3M\x07<]\xc9\x14\xe1BA\x06\xbe\x084'
# pw = 'BZh91AY&SY\x94$|\x0e\x00\x00\x00\x81\x00\x03$ \x00!\x9ah3M\x13<]\xc9\x14\xe1BBP\x91\xf08'
# print bz2.decompress(un)
# print bz2.decompress(pw)
#
#
# print '-' * 20, '#9', '-' * 20
# first = [146, 399, 163, 403, 170, 393, 169, 391, 166, 386, 170, 381, 170, 371, 170, 355, 169, 346, 167, 335, 170, 329, 170, 320, 170,
# 310, 171, 301, 173, 290, 178, 289, 182, 287, 188, 286, 190, 286, 192, 291, 194, 296, 195, 305, 194, 307, 191, 312, 190, 316,
# 190, 321, 192, 331, 193, 338, 196, 341, 197, 346, 199, 352, 198, 360, 197, 366, 197, 373, 196, 380, 197, 383, 196, 387, 192,
# 389, 191, 392, 190, 396, 189, 400, 194, 401, 201, 402, 208, 403, 213, 402, 216, 401, 219, 397, 219, 393, 216, 390, 215, 385,
# 215, 379, 213, 373, 213, 365, 212, 360, 210, 353, 210, 347, 212, 338, 213, 329, 214, 319, 215, 311, 215, 306, 216, 296, 218,
# 290, 221, 283, 225, 282, 233, 284, 238, 287, 243, 290, 250, 291, 255, 294, 261, 293, 265, 291, 271, 291, 273, 289, 278, 287,
# 279, 285, 281, 280, 284, 278, 284, 276, 287, 277, 289, 283, 291, 286, 294, 291, 296, 295, 299, 300, 301, 304, 304, 320, 305,
# 327, 306, 332, 307, 341, 306, 349, 303, 354, 301, 364, 301, 371, 297, 375, 292, 384, 291, 386, 302, 393, 324, 391, 333, 387,
# 328, 375, 329, 367, 329, 353, 330, 341, 331, 328, 336, 319, 338, 310, 341, 304, 341, 285, 341, 278, 343, 269, 344, 262, 346,
# 259, 346, 251, 349, 259, 349, 264, 349, 273, 349, 280, 349, 288, 349, 295, 349, 298, 354, 293, 356, 286, 354, 279, 352, 268,
# 352, 257, 351, 249, 350, 234, 351, 211, 352, 197, 354, 185, 353, 171, 351, 154, 348, 147, 342, 137, 339, 132, 330, 122, 327,
# 120, 314, 116, 304, 117, 293, 118, 284, 118, 281, 122, 275, 128, 265, 129, 257, 131, 244, 133, 239, 134, 228, 136, 221, 137,
# 214, 138, 209, 135, 201, 132, 192, 130, 184, 131, 175, 129, 170, 131, 159, 134, 157, 134, 160, 130, 170, 125, 176, 114, 176,
# 102, 173, 103, 172, 108, 171, 111, 163, 115, 156, 116, 149, 117, 142, 116, 136, 115, 129, 115, 124, 115, 120, 115, 115, 117,
# 113, 120, 109, 122, 102, 122, 100, 121, 95, 121, 89, 115, 87, 110, 82, 109, 84, 118, 89, 123, 93, 129, 100, 130, 108, 132, 110,
# 133, 110, 136, 107, 138, 105, 140, 95, 138, 86, 141, 79, 149, 77, 155, 81, 162, 90, 165, 97, 167, 99, 171, 109, 171, 107, 161,
# 111, 156, 113, 170, 115, 185, 118, 208, 117, 223, 121, 239, 128, 251, 133, 259, 136, 266, 139, 276, 143, 290, 148, 310, 151,
# 332, 155, 348, 156, 353, 153, 366, 149, 379, 147, 394, 146, 399]
# second = [156, 141, 165, 135, 169, 131, 176, 130, 187, 134, 191, 140, 191, 146, 186, 150, 179, 155, 175, 157, 168, 157, 163, 157, 159,
# 157, 158, 164, 159, 175, 159, 181, 157, 191, 154, 197, 153, 205, 153, 210, 152, 212, 147, 215, 146, 218, 143, 220, 132, 220,
# 125, 217, 119, 209, 116, 196, 115, 185, 114, 172, 114, 167, 112, 161, 109, 165, 107, 170, 99, 171, 97, 167, 89, 164, 81, 162,
# 77, 155, 81, 148, 87, 140, 96, 138, 105, 141, 110, 136, 111, 126, 113, 129, 118, 117, 128, 114, 137, 115, 146, 114, 155, 115,
# 158, 121, 157, 128, 156, 134, 157, 136, 156, 136]
# plt.figure(1)
# plt.plot(first[::2], first[1::2])
# plt.plot(second[::2], second[1::2])
# plt.show()
#
# print '-' * 20, '#10', '-' * 20
# def gennext(s):
# g = []
# start = 0
# for i in range(1, len(s)):
# if s[i] != s[i - 1]:
# g.append(s[start:i])
# start = i
# g.append(s[start:])
# res = ''
# for s in g:
# res += str(len(s)) + s[0]
# return res
#
# sample_a = [1, 11, 21, 1211, 111221]
# a = []
# a.append('1')
# print 'a[0] = ' + a[0]
# for i in range(1, 31):
# a.append(gennext(a[-1]))
# print 'a[' + str(i) + '] = ' + a[i]
# print len(a[30])
#
#
# print '-' * 20, '#11', '-' * 20
# im = Image.open('/home/qcg/download/cave.jpg')
# print im.size
# for row in range(6):
# for col in range(6):
# print im.getpixel((row, col)),
# print
# odd = Image.new(im.mode, im.size)
# for row in range(0, im.size[0]):
# for col in range(0, im.size[1]):
# if (row + col) % 2 == 0:
# odd.putpixel((row, col), im.getpixel((row, col)))
# odd.show()
#
#
# im = Image.open('/home/qcg/download/cave.jpg') # method 2
# data = list(im.getdata())
# doit = False
# for i in range(0, len(data)):
# if (i + 1) % im.size[0] == 0:
# continue
# if doit:
# data[i] = ((data[i - 1][0] + data[i + 1][0]) / 2, (data[i - 1][1] + data[i + 1][1]) / 2, (data[i - 1][2] + data[i + 1][2]) / 2)
# doit = not doit
# im.putdata(data)
# im.show()
#
#
# print '-' * 20, '#12', '-' * 20
#
#
# def get_challenge(file):
# if not os.path.isfile(file):
# urllib.urlretrieve('http://www.pythonchallenge.com/pc/return/' + file, file)
# return open(file, 'rb')
#
#
# f = get_challenge('evil2.gfx')
# content = f.read()
# f.close()
# for i in xrange(5):
# f = open('%d.bin' % i, 'wb')
# f.write(content[i::5])
# f.close()
#
# print '-' * 20, '#13', '-' * 20
# proxy = xmlrpclib.ServerProxy('http://www.pythonchallenge.com/pc/phonebook.php')
# print proxy.system.listMethods()
# print proxy.system.methodHelp('phone')
# print proxy.phone('Bert')
# print '-' * 20, '#14', '-' * 20
# 100*100=(100+99+99+98)+(...
# l = [(i,i-1,i-1,i-2) for i in xrange(100,1,-2)]
#
# im = Image.open('/home/qcg/download/wire.png')
# imdata = im.getdata()
# im2 = Image.new(im.mode, (100,100))
# im2data = im2.load()
# x, y = -1, 0
# index = 0
# for r,d,l,u in l:
# for i in range(r):
# x += 1
# im2data[(x, y)] = imdata[index]
# index += 1
# for i in range(d):
# y += 1
# im2data[(x, y)] = imdata[index]
# index += 1
# for i in range(l):
# x -= 1
# im2data[(x, y)] = imdata[index]
# index += 1
# for i in range(u):
# y -= 1
# im2data[(x, y)] = imdata[index]
# index += 1
# print imdata[1]
# print im2data[(0,0)]
# im2.save('res.png')
#
# print '-' * 20, '#15', '-' * 20
# from calendar import isleap
# from datetime import date
# TUESDAY = 1
# for year in range(1006, 2000, 10):
# t = date(year, 1, 27)
# if isleap(year) and t.weekday() == TUESDAY:
# print t.isoformat()
#
# print '-' * 20, '#16', '-' * 20
# import Image
# im = Image.open('/home/qcg/download/mozart.gif')
# imdata = list(im.getdata())
# w, h = im.size
# print imdata[0:2000]
# print max(imdata)
# d = dict()
# for item in imdata:
# if item in d:
# d[item] += 1
# else:
# d[item] = 1
# for i in d.keys():
# print i, "=>", d[i]
# print sorted(d.values())
# prev = 0
# count = 1
# index = 0
# res = []
# for item in imdata:
# index += 1
# if item == prev:
# count += 1
# else:
# if count == 5 and prev == 195:
# res.append(index - 5)
# count = 1
# prev = item
# print res
# collect = []
# for y in xrange(h):
# for x in xrange(w):
# if im.getpixel((x, y)) == 195 and im.getpixel((x + 4, y)) == 195:
# collect.append((x,y))
# print collect[0:20]
#
# patten = [195, 195, 195, 195, 195, ]
# bars = []
# for i in range(len(imdata)):
# if imdata[i:i + len(patten)] == patten:
# bars.append((i%w, i/w))
#
# shift = Image.new(im.mode, (w * 2, h), 0)
# shift.palette = im.palette # share colour table
# for j in range(h):
# for i in range(w):
# shift.putpixel((i + w - bars[j][0], j), im.getpixel((i,j)))
# shift.save('shift.png')
print '-' * 20, '#17', '-' * 20
| mit |
arabenjamin/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 276 | 3790 | # Authors: Lars Buitinck <[email protected]>
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
0asa/scikit-learn | examples/feature_selection/plot_permutation_test_for_classification.py | 250 | 2233 | """
=================================================================
Test with permutations the significance of a classification score
=================================================================
In order to test if a classification score is significative a technique
in repeating the classification procedure after randomizing, permuting,
the labels. The p-value is then given by the percentage of runs for
which the score obtained is greater than the classification score
obtained in the first place.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold, permutation_test_score
from sklearn import datasets
##############################################################################
# Loading a dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
# Some noisy data not correlated
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
# Add noisy data to the informative features for make the task harder
X = np.c_[X, E]
svm = SVC(kernel='linear')
cv = StratifiedKFold(y, 2)
score, permutation_scores, pvalue = permutation_test_score(
svm, X, y, scoring="accuracy", cv=cv, n_permutations=100, n_jobs=1)
print("Classification score %s (pvalue : %s)" % (score, pvalue))
###############################################################################
# View histogram of permutation scores
plt.hist(permutation_scores, 20, label='Permutation scores')
ylim = plt.ylim()
# BUG: vlines(..., linestyle='--') fails on older versions of matplotlib
#plt.vlines(score, ylim[0], ylim[1], linestyle='--',
# color='g', linewidth=3, label='Classification Score'
# ' (pvalue %s)' % pvalue)
#plt.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--',
# color='k', linewidth=3, label='Luck')
plt.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
plt.ylim(ylim)
plt.legend()
plt.xlabel('Score')
plt.show()
| bsd-3-clause |
ZhangXinNan/tensorflow | tensorflow/python/estimator/inputs/pandas_io.py | 13 | 5823 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import uuid
import numpy as np
from tensorflow.python.estimator.inputs.queues import feeding_functions
from tensorflow.python.util.tf_export import estimator_export
try:
# pylint: disable=g-import-not-at-top
# pylint: disable=unused-import
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _get_unique_target_key(features, target_column_name):
"""Returns a key that does not exist in the input DataFrame `features`.
Args:
features: DataFrame
target_column_name: Name of the target column as a `str`
Returns:
A unique key that can be used to insert the target into
features.
"""
if target_column_name in features:
target_column_name += '_' + str(uuid.uuid4())
return target_column_name
@estimator_export('estimator.inputs.pandas_input_fn')
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=None,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""Returns input function that would feed Pandas DataFrame into the model.
Note: `y`'s index must match `x`'s index.
Args:
x: pandas `DataFrame` object.
y: pandas `Series` object or `DataFrame`. `None` if absent.
batch_size: int, size of batches to return.
num_epochs: int, number of epochs to iterate over data. If not `None`,
read attempts that would exceed this value will raise `OutOfRangeError`.
shuffle: bool, whether to read the records in random order.
queue_capacity: int, size of the read queue. If `None`, it will be set
roughly to the size of `x`.
num_threads: Integer, number of threads used for reading and enqueueing. In
order to have predicted and repeatable order of reading and enqueueing,
such as in prediction and evaluation mode, `num_threads` should be 1.
target_column: str, name to give the target column `y`. This parameter
is not used when `y` is a `DataFrame`.
Returns:
Function, that has signature of ()->(dict of `features`, `target`)
Raises:
ValueError: if `x` already contains a column with the same name as `y`, or
if the indexes of `x` and `y` don't match.
ValueError: if 'shuffle' is not provided or a bool.
"""
if not HAS_PANDAS:
raise TypeError(
'pandas_input_fn should not be called without pandas installed')
if not isinstance(shuffle, bool):
raise ValueError('shuffle must be provided and explicitly set as boolean '
'(it is recommended to set it as True for training); '
'got {}'.format(shuffle))
if not isinstance(target_column, six.string_types):
raise TypeError('target_column must be a string type')
x = x.copy()
if y is not None:
if target_column in x:
raise ValueError(
'Cannot use name %s for target column: DataFrame already has a '
'column with that name: %s' % (target_column, x.columns))
if not np.array_equal(x.index, y.index):
raise ValueError('Index for x and y are mismatched.\nIndex for x: %s\n'
'Index for y: %s\n' % (x.index, y.index))
if isinstance(y, pd.DataFrame):
y_columns = [(column, _get_unique_target_key(x, column))
for column in list(y)]
target_column = [v for _, v in y_columns]
x[target_column] = y
else:
x[target_column] = y
# TODO(mdan): These are memory copies. We probably don't need 4x slack space.
# The sizes below are consistent with what I've seen elsewhere.
if queue_capacity is None:
if shuffle:
queue_capacity = 4 * len(x)
else:
queue_capacity = len(x)
min_after_dequeue = max(queue_capacity / 4, 1)
def input_fn():
"""Pandas input function."""
queue = feeding_functions._enqueue_data( # pylint: disable=protected-access
x,
queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
enqueue_size=batch_size,
num_epochs=num_epochs)
if num_epochs is None:
features = queue.dequeue_many(batch_size)
else:
features = queue.dequeue_up_to(batch_size)
assert len(features) == len(x.columns) + 1, ('Features should have one '
'extra element for the index.')
features = features[1:]
features = dict(zip(list(x.columns), features))
if y is not None:
if isinstance(target_column, list):
keys = [k for k, _ in y_columns]
values = [features.pop(column) for column in target_column]
target = {k: v for k, v in zip(keys, values)}
else:
target = features.pop(target_column)
return features, target
return features
return input_fn
| apache-2.0 |
Obus/scikit-learn | sklearn/decomposition/pca.py | 192 | 23117 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Michael Eickenberg <[email protected]>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
from ..utils.validation import check_is_fitted
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
Data spectrum.
rank: int
Tested rank value.
n_samples: int
Number of samples.
n_features: int
Number of features.
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Principal axes in feature space, representing the directions of
maximum variance in the data.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
n_components_ : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < n_features:
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mean_')
if self.whiten:
return fast_dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Read more in the :ref:`User Guide <RandomizedPCA>`.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| bsd-3-clause |
matthiasplappert/motion_classification | scripts/plot_hmm_hyperparams.py | 1 | 1308 | import csv
import numpy as np
import matplotlib.pyplot as plt
from pylab import rcParams
rcParams['figure.figsize'] = 12, 5
rcParams['text.usetex']=True
#rcParams['text.latex.unicode']=True
topologies = {}
max_y = None
with open('/Users/matze/Desktop/hmm/hyperparams/_results.csv', 'rb') as f:
reader = csv.reader(f, delimiter=';')
for row_idx, row in enumerate(reader):
if row_idx == 0:
continue
measure = float(row[2])
n_states, topology = row[3].split(', ')
if max_y is None or max_y < measure:
max_y = measure
if topology not in topologies:
topologies[topology] = ([], [])
topologies[topology][0].append(int(n_states))
topologies[topology][1].append(measure)
data = []
data.append(topologies['full'])
data.append(topologies['left-to-right-full'])
data.append(topologies['left-to-right-1'])
data.append(topologies['left-to-right-2'])
for data, (xs, ys) in topologies.iteritems():
plt.plot(xs, ys)
plt.legend(['fully-connected', 'left-to-right without $\Delta$ constraint', 'left-to-right with $\Delta = 1$', 'left-to-right with $\Delta = 2$'], loc='upper right')
plt.xticks(range(3, 21))
plt.xlabel('number of states')
plt.ylabel('score')
plt.grid(True)
plt.axis([3, 20, 25000, 60000])
plt.show()
| mit |
altairpearl/scikit-learn | sklearn/neighbors/classification.py | 4 | 14363 | """Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable, optional (default = 'uniform')
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default = 'minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights[inliers])],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
thilbern/scikit-learn | examples/tree/plot_tree_regression.py | 40 | 1470 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
clf_1 = DecisionTreeRegressor(max_depth=2)
clf_2 = DecisionTreeRegressor(max_depth=5)
clf_1.fit(X, y)
clf_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = clf_1.predict(X_test)
y_2 = clf_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
yanlend/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
pombredanne/http-repo.gem5.org-gem5- | util/stats/output.py | 90 | 7981 | # Copyright (c) 2005-2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from chart import ChartOptions
class StatOutput(ChartOptions):
def __init__(self, jobfile, info, stat=None):
super(StatOutput, self).__init__()
self.jobfile = jobfile
self.stat = stat
self.invert = False
self.info = info
def display(self, name, printmode = 'G'):
import info
if printmode == 'G':
valformat = '%g'
elif printmode != 'F' and value > 1e6:
valformat = '%0.5e'
else:
valformat = '%f'
for job in self.jobfile.jobs():
value = self.info.get(job, self.stat)
if value is None:
return
if not isinstance(value, list):
value = [ value ]
if self.invert:
for i,val in enumerate(value):
if val != 0.0:
value[i] = 1 / val
valstring = ', '.join([ valformat % val for val in value ])
print '%-50s %s' % (job.name + ':', valstring)
def graph(self, name, graphdir, proxy=None):
from os.path import expanduser, isdir, join as joinpath
from barchart import BarChart
from matplotlib.numerix import Float, array, zeros
import os, re, urllib
from jobfile import crossproduct
confgroups = self.jobfile.groups()
ngroups = len(confgroups)
skiplist = [ False ] * ngroups
groupopts = []
baropts = []
groups = []
for i,group in enumerate(confgroups):
if group.flags.graph_group:
groupopts.append(group.subopts())
skiplist[i] = True
elif group.flags.graph_bars:
baropts.append(group.subopts())
skiplist[i] = True
else:
groups.append(group)
has_group = bool(groupopts)
if has_group:
groupopts = [ group for group in crossproduct(groupopts) ]
else:
groupopts = [ None ]
if baropts:
baropts = [ bar for bar in crossproduct(baropts) ]
else:
raise AttributeError, 'No group selected for graph bars'
directory = expanduser(graphdir)
if not isdir(directory):
os.mkdir(directory)
html = file(joinpath(directory, '%s.html' % name), 'w')
print >>html, '<html>'
print >>html, '<title>Graphs for %s</title>' % name
print >>html, '<body>'
html.flush()
for options in self.jobfile.options(groups):
chart = BarChart(self)
data = [ [ None ] * len(baropts) for i in xrange(len(groupopts)) ]
enabled = False
stacked = 0
for g,gopt in enumerate(groupopts):
for b,bopt in enumerate(baropts):
if gopt is None:
gopt = []
job = self.jobfile.job(options + gopt + bopt)
if not job:
continue
if proxy:
import db
proxy.dict['system'] = self.info[job.system]
val = self.info.get(job, self.stat)
if val is None:
print 'stat "%s" for job "%s" not found' % \
(self.stat, job)
if isinstance(val, (list, tuple)):
if len(val) == 1:
val = val[0]
else:
stacked = len(val)
data[g][b] = val
if stacked == 0:
for i in xrange(len(groupopts)):
for j in xrange(len(baropts)):
if data[i][j] is None:
data[i][j] = 0.0
else:
for i in xrange(len(groupopts)):
for j in xrange(len(baropts)):
val = data[i][j]
if val is None:
data[i][j] = [ 0.0 ] * stacked
elif len(val) != stacked:
raise ValueError, "some stats stacked, some not"
data = array(data)
if data.sum() == 0:
continue
dim = len(data.shape)
x = data.shape[0]
xkeep = [ i for i in xrange(x) if data[i].sum() != 0 ]
y = data.shape[1]
ykeep = [ i for i in xrange(y) if data[:,i].sum() != 0 ]
data = data.take(xkeep, axis=0)
data = data.take(ykeep, axis=1)
if not has_group:
data = data.take([ 0 ], axis=0)
chart.data = data
bopts = [ baropts[i] for i in ykeep ]
bdescs = [ ' '.join([o.desc for o in opt]) for opt in bopts]
if has_group:
gopts = [ groupopts[i] for i in xkeep ]
gdescs = [ ' '.join([o.desc for o in opt]) for opt in gopts]
if chart.legend is None:
if stacked:
try:
chart.legend = self.info.rcategories
except:
chart.legend = [ str(i) for i in xrange(stacked) ]
else:
chart.legend = bdescs
if chart.xticks is None:
if has_group:
chart.xticks = gdescs
else:
chart.xticks = []
chart.graph()
names = [ opt.name for opt in options ]
descs = [ opt.desc for opt in options ]
if names[0] == 'run':
names = names[1:]
descs = descs[1:]
basename = '%s-%s' % (name, ':'.join(names))
desc = ' '.join(descs)
pngname = '%s.png' % basename
psname = '%s.eps' % re.sub(':', '-', basename)
epsname = '%s.ps' % re.sub(':', '-', basename)
chart.savefig(joinpath(directory, pngname))
chart.savefig(joinpath(directory, epsname))
chart.savefig(joinpath(directory, psname))
html_name = urllib.quote(pngname)
print >>html, '''%s<br><img src="%s"><br>''' % (desc, html_name)
html.flush()
print >>html, '</body>'
print >>html, '</html>'
html.close()
| bsd-3-clause |
msis/pyIbex | examples/test_SIVIA.py | 1 | 1628 | #!/usr/bin/env python
# import matplotlib as mpl
# mpl.use('pgf')
from pyIbex import *
# import glob
# from time import sleep
# mpl.rcParams['text.latex.unicode']=True
# mpl.rcParams['text.usetex']=True
# mpl.rcParams['pgf.texsystem'] = 'pdflatex'
def contract(X, ctc, res_list):
X0 = IntervalVector(X)
ctc.contract(X)
if( X != X0):
diff = X0.diff(X)
for b in diff:
res_list.append(b)
X0 = IntervalVector(2, Interval(-5,5))
stack = [X0]
res_y = []
res_in = []
res_out = []
f = Function("x", "y", "x^2+y^2")
R2 = Interval(4).inflate(0.1)
CtcIn = CtcIn(f, R2)
CtcNotIn = CtcNotIn(f, R2)
lf = LargestFirst(0.01)
while len(stack) > 0:
X = stack.pop()
contract(X, CtcNotIn, res_in)
contract(X, CtcIn, res_out)
if( X.max_diam() < 0.01):
res_y.append(X)
# drawbox(ax, X, "y")
elif (X.is_empty() == False):
(X1, X2) = lf.bisect(X)
stack.append(X1)
stack.append(X2)
# print(len(res_y),len(res_in), len(res_out))
# import matplotlib.pyplot as plt
# from matplotlib.patches import Rectangle, Circle, Polygon
# import numpy as np
# from numpy import cos,sin
# def drawbox(ax,data,cin,facecolor='b', fill=False):
# x = [data[0].lb(), data[0].ub()]
# y = [data[1].lb(), data[1].ub()]
# wx = (x[1] - x[0])
# wy = (y[1] - y[0])
# x0 = 0.5*(x[0] + x[1] - wx)
# y0 = 0.5*(y[0] + y[1] - wy)
# ax.add_patch(Rectangle( (x0,y0),wx,wy,fill=fill,edgecolor=cin, facecolor=facecolor))
# fig = plt.figure()
# ax = fig.add_subplot(111)
# for b in res_out:
# drawbox(ax, b, "b")
# for b in res_in:
# drawbox(ax, b, "r")
# for b in res_y:
# drawbox(ax, b, "y")
# plt.axis([-3, 3, -3, 3])
# plt.show() | lgpl-3.0 |
wzbozon/scikit-learn | sklearn/metrics/ranking.py | 44 | 25479 | """Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from .base import _average_binary_score
from .base import UndefinedMetricWarning
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds <= len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (array_equal(classes, [0, 1]) or
array_equal(classes, [-1, 1]) or
array_equal(classes, [0]) or
array_equal(classes, [-1]) or
array_equal(classes, [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
| bsd-3-clause |
najmacherrad/master_thesis | DATA/1KGP/Modif_fasta_neutral.py | 1 | 7849 | # TRANSFORM fasta sequence after neutral 1KGP mutation
# /Memoire/Neutral_mutation
# coding=utf-8
from numpy import *
import petl as etl
import operator
import pandas as pd
import re
import csv
#-------------------------------------------------------------------------------
# Creation of file with fasta sequence mutant
#-------------------------------------------------------------------------------
df = pd.read_csv('neutral_variants.csv',',') #176 columns
df2 = df[['id','hgvs_protein','gene_symbol','protein_pos','protein_length','gene_ensembl','transcript_ensembl','transcript_uniprot_id','dbsnp_id','snpeff_effect','Protein_position_Biomart','Protein_ref_allele','Protein_alt_allele']]
df2.to_csv('neutral_variants_modif.csv', index=False)
#-------------------------------------------------------------------------------
V = open('neutral_variants_modif.csv','r')
c1 = csv.reader(V,delimiter=',')
variants = list(c1) #8'670
variants.pop(0)
file_fasta_wt = 'DIDAgenes_wt.txt' #136 sequences
file_fasta_mut = 'DIDAgenes_neutralmut.txt' # OUTPUT file
fasta = open(file_fasta_wt,'r')
mutant = open(file_fasta_mut,'w')
lines = fasta.readlines()
sequences={} #all fasta sequences without the 1st line >
listID={} #to keep the first line of fasta sequence with all informations
for i in range(0,len(lines)):
if lines[i].startswith('>'):
splitline = lines[i].split('|')
accessorID = splitline[1]
listID[accessorID] = lines[i].split(' ')[0]
sequences[accessorID] = ''
else:
sequences[accessorID] = sequences[accessorID] + lines[i].rstrip('\n').rstrip('*')
#++++++++++++++++++++++++++++++++++++++++
# MODIFICATION of fasta sequence after mutation
#++++++++++++++++++++++++++++++++++++++++
compteur = 0
pb = open('ProblemsFile.txt','w')
for v in variants:
for ID in sequences.keys():
if ID.split('-')[0] == v[7]:
#---------------------------------------------------------------
if v[9]=='deletion' and v[10]!='0':
aa1 = v[11]
longueur = len(aa1)
position1 = int(v[10])
new_aa1 = v[12]
s1 = list(sequences[ID])
if new_aa1=='-0':
if ''.join(s1[(position1 - 1):(position1-1+longueur)])==aa1 :
s1[(position1 - 1):(position1-1+longueur)] = ''
seq1=''.join(s1)
mutant.write(listID[ID] + '|variant_' + v[8] + '\n')
for i in range(0,len(seq1),60):
mutant.write(seq1[i:min(len(seq1),i+60)] + '\n')
else:
pb.write('Position problem in '+ ID +' (' + v[8] + ') position ' + str(position1) + ' with effect deletion'+ '\n')
else:
if ''.join(s1[(position1 - 1):(position1-1+longueur)])==aa1 :
s1[(position1 - 1):(position1-1+longueur)] = new_aa1
seq1=''.join(s1)
mutant.write(listID[ID] + '|variant_' + v[8] + '\n')
for i in range(0,len(seq1),60):
mutant.write(seq1[i:min(len(seq1),i+60)] + '\n')
else:
pb.write('Position problem in '+ ID +' (' + v[8] + ') position ' + str(position1) + ' with effect deletion'+ '\n')
#---------------------------------------------------------------
elif v[9]=='insertion' and v[10]!='0':
aa3 = v[11]
position3 = int(v[10])
new_aa3= v[12]
s3 = list(sequences[ID])
if s3[position3 - 1]==aa3 :
s3[position3-1] = new_aa3 + s3[position3-1]
seq3=''.join(s3)
mutant.write(listID[ID] + '|variant_' + v[8] + '\n')
for i in range(0,len(seq3),60):
mutant.write(seq3[i:min(len(seq3),i+60)] + '\n')
else:
pb.write('Position problem in '+ ID +' (' + v[8] + ') position '+ str(position3) + ' with effect insertion'+ '\n')
#-----------------------------------------------------------------
elif v[9]=='missense' and v[10]!='0' and v[12]!='*':
#-------------
if v[1].startswith('p.Ter'):
position5 = int(v[10])
new_aa5=v[12]
s5 = list(sequences[ID])
s5.append(new_aa5)
seq5=''.join(s5)
mutant.write(listID[ID] + '|variant_' + v[8] + '\n')
for i in range(0,len(seq5),60):
mutant.write(seq5[i:min(len(seq5),i+60)] + '\n')
#-------------
else:
aa4 = v[11]
position4 = int(v[10])
new_aa4=v[12]
s4 = list(sequences[ID])
if position4 < len(s4):
if s4[position4 - 1]==aa4:
s4[position4 - 1] = new_aa4
seq4=''.join(s4)
mutant.write(listID[ID] + '|variant_' + v[8] + '\n')
for i in range(0,len(seq4),60):
mutant.write(seq4[i:min(len(seq4),i+60)] + '\n')
else:
pb.write('Position problem in '+ ID +' (' + v[8] + ') position '+ str(position4) + ' with effect missense'+ '\n')
else:
pb.write('Isoform problem in '+ ID +' (' + v[8] + ') position '+ str(position4) + ' with effect missense'+ '\n')
#---------------------------------------------------------------
elif v[9]=='synonymous' and v[10]!='0' :
if v[12]!='*':
aa6 = v[11]
position6 = int(v[10])
s6 = list(sequences[ID])
if position6 < len(s6):
if s6[position6 - 1]==aa6 :
seq6 = sequences[ID]
mutant.write(listID[ID] + '|variant_' + v[8] + '\n')
for i in range(0,len(seq6),60):
mutant.write(seq6[i:min(len(seq6),i+60)] + '\n')
compteur = compteur + 1
else:
pb.write('Position problem in '+ ID +' (' + v[8] + ') position '+ str(position6) + ' with effect synonymous'+ '\n')
else:
pb.write('Isoform problem in '+ ID +' (' + v[8] + ') position '+ str(position6) + ' with effect synonymous'+ '\n')
elif v[11] == '*' and v[12]=='*':
aa6 = v[11]
position6 = int(v[10])
s6 = list(sequences[ID])
if position6 < len(s6):
if s6[position6 - 1]==aa6 :
seq6 = sequences[ID]
mutant.write(listID[ID] + '|variant_' + v[8] + '\n')
for i in range(0,len(seq6),60):
mutant.write(seq6[i:min(len(seq6),i+60)] + '\n')
compteur = compteur + 1
else:
pb.write('Position problem in '+ ID +' (' + v[8] + ') position '+ str(position6) + ' with effect synonymous'+ '\n')
else:
pb.write('Isoform problem in '+ ID +' (' + v[8] + ') position '+ str(position6) + ' with effect synonymous'+ '\n')
fasta.close()
V.close()
mutant.close()
pb.close()
| mit |
imaculate/scikit-learn | examples/linear_model/plot_ransac.py | 73 | 1859 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
lw = 2
plt.scatter(X[inlier_mask], y[inlier_mask], color='yellowgreen', marker='.',
label='Inliers')
plt.scatter(X[outlier_mask], y[outlier_mask], color='gold', marker='.',
label='Outliers')
plt.plot(line_X, line_y, color='navy', linestyle='-', linewidth=lw,
label='Linear regressor')
plt.plot(line_X, line_y_ransac, color='cornflowerblue', linestyle='-',
linewidth=lw, label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
Schwittleymani/ECO | src/python/pdf2text/visualize_statistics.py | 2 | 2073 | import sys
import os
import argparse
import matplotlib.pyplot as plt
import numpy as np
def process_arguments(args):
parser = argparse.ArgumentParser(description='visualize the statistics')
parser.add_argument('--statistics_path', action='store', help='path to the file that holds the statistics')
params = vars(parser.parse_args(args))
return params
if __name__ == '__main__':
params = process_arguments(sys.argv[1:])
path = params['statistics_path']
lines = open(path, 'r').readlines()
labels = lines[0].split(';')[1:]
cmap = plt.get_cmap('jet')
colors = cmap(np.linspace(0, 1, len(labels)))
values = [0] * len(labels)
plt.rcParams['font.size'] = 9.0
for stat in lines[1:]:
sizes = list(map(int, stat.split(';')[1:]))
index = 0
for v in sizes:
values[index] += v
index += 1
def make_autopct(values):
def my_autopct(pct):
total = sum(values)
val = int(round(pct*total/100.0))
return '{p:.2f}% ({v:d})'.format(p=pct,v=val)
return my_autopct
patches, texts, autotexts = plt.pie(values, # data
labels=labels, # slice labels
colors=colors, # array of colours
pctdistance=0.7,
autopct=make_autopct(values), # print the values inside the wedges
shadow=True, # enable shadow
startangle=70 # starting angle
)
for text in texts:
text.set_fontsize(6)
for text in autotexts:
text.set_fontsize(6)
for index, item in enumerate(labels):
val = ': ' + str(values[index]) + ' ( ' + str(round((values[index] / float(sum(values))) * 100.0, 2)) + ' % )'
labels[index] += val
plt.legend(labels, fontsize=5, loc="best")
plt.axis('equal')
# getting the filename of the statistics, and saving the statistics image with the same name
head, tail = os.path.split(path)
tail = tail.replace('.txt', '.png')
print('Writing plot to ' + tail)
plt.savefig(tail, dpi=300) | apache-2.0 |
emon10005/scikit-image | doc/examples/plot_radon_transform.py | 17 | 8432 | """
===============
Radon transform
===============
In computed tomography, the tomography reconstruction problem is to obtain
a tomographic slice image from a set of projections [1]_. A projection is
formed by drawing a set of parallel rays through the 2D object of interest,
assigning the integral of the object's contrast along each ray to a single
pixel in the projection. A single projection of a 2D object is one dimensional.
To enable computed tomography reconstruction of the object, several projections
must be acquired, each of them corresponding to a different angle between the
rays with respect to the object. A collection of projections at several angles
is called a sinogram, which is a linear transform of the original image.
The inverse Radon transform is used in computed tomography to reconstruct
a 2D image from the measured projections (the sinogram). A practical, exact
implementation of the inverse Radon transform does not exist, but there are
several good approximate algorithms available.
As the inverse Radon transform reconstructs the object from a set of
projections, the (forward) Radon transform can be used to simulate a
tomography experiment.
This script performs the Radon transform to simulate a tomography experiment
and reconstructs the input image based on the resulting sinogram formed by
the simulation. Two methods for performing the inverse Radon transform
and reconstructing the original image are compared: The Filtered Back
Projection (FBP) and the Simultaneous Algebraic Reconstruction
Technique (SART).
For further information on tomographic reconstruction, see
- AC Kak, M Slaney, "Principles of Computerized Tomographic Imaging",
http://www.slaney.org/pct/pct-toc.html
- http://en.wikipedia.org/wiki/Radon_transform
The forward transform
=====================
As our original image, we will use the Shepp-Logan phantom. When calculating
the Radon transform, we need to decide how many projection angles we wish
to use. As a rule of thumb, the number of projections should be about the
same as the number of pixels there are across the object (to see why this
is so, consider how many unknown pixel values must be determined in the
reconstruction process and compare this to the number of measurements
provided by the projections), and we follow that rule here. Below is the
original image and its Radon transform, often known as its _sinogram_:
"""
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
from skimage.io import imread
from skimage import data_dir
from skimage.transform import radon, rescale
image = imread(data_dir + "/phantom.png", as_grey=True)
image = rescale(image, scale=0.4)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4.5))
ax1.set_title("Original")
ax1.imshow(image, cmap=plt.cm.Greys_r)
theta = np.linspace(0., 180., max(image.shape), endpoint=False)
sinogram = radon(image, theta=theta, circle=True)
ax2.set_title("Radon transform\n(Sinogram)")
ax2.set_xlabel("Projection angle (deg)")
ax2.set_ylabel("Projection position (pixels)")
ax2.imshow(sinogram, cmap=plt.cm.Greys_r,
extent=(0, 180, 0, sinogram.shape[0]), aspect='auto')
fig.subplots_adjust(hspace=0.4, wspace=0.5)
plt.show()
"""
.. image:: PLOT2RST.current_figure
Reconstruction with the Filtered Back Projection (FBP)
======================================================
The mathematical foundation of the filtered back projection is the Fourier
slice theorem [2]_. It uses Fourier transform of the projection and
interpolation in Fourier space to obtain the 2D Fourier transform of the image,
which is then inverted to form the reconstructed image. The filtered back
projection is among the fastest methods of performing the inverse Radon
transform. The only tunable parameter for the FBP is the filter, which is
applied to the Fourier transformed projections. It may be used to suppress
high frequency noise in the reconstruction. ``skimage`` provides a few
different options for the filter.
"""
from skimage.transform import iradon
reconstruction_fbp = iradon(sinogram, theta=theta, circle=True)
error = reconstruction_fbp - image
print('FBP rms reconstruction error: %.3g' % np.sqrt(np.mean(error**2)))
imkwargs = dict(vmin=-0.2, vmax=0.2)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4.5))
ax1.set_title("Reconstruction\nFiltered back projection")
ax1.imshow(reconstruction_fbp, cmap=plt.cm.Greys_r)
ax2.set_title("Reconstruction error\nFiltered back projection")
ax2.imshow(reconstruction_fbp - image, cmap=plt.cm.Greys_r, **imkwargs)
plt.show()
"""
.. image:: PLOT2RST.current_figure
Reconstruction with the Simultaneous Algebraic Reconstruction Technique
=======================================================================
Algebraic reconstruction techniques for tomography are based on a
straightforward idea: for a pixelated image the value of a single ray in a
particular projection is simply a sum of all the pixels the ray passes through
on its way through the object. This is a way of expressing the forward Radon
transform. The inverse Radon transform can then be formulated as a (large) set
of linear equations. As each ray passes through a small fraction of the pixels
in the image, this set of equations is sparse, allowing iterative solvers for
sparse linear systems to tackle the system of equations. One iterative method
has been particularly popular, namely Kaczmarz' method [3]_, which has the
property that the solution will approach a least-squares solution of the
equation set.
The combination of the formulation of the reconstruction problem as a set
of linear equations and an iterative solver makes algebraic techniques
relatively flexible, hence some forms of prior knowledge can be incorporated
with relative ease.
``skimage`` provides one of the more popular variations of the algebraic
reconstruction techniques: the Simultaneous Algebraic Reconstruction Technique
(SART) [1]_ [4]_. It uses Kaczmarz' method [3]_ as the iterative solver. A good
reconstruction is normally obtained in a single iteration, making the method
computationally effective. Running one or more extra iterations will normally
improve the reconstruction of sharp, high frequency features and reduce the
mean squared error at the expense of increased high frequency noise (the user
will need to decide on what number of iterations is best suited to the problem
at hand. The implementation in ``skimage`` allows prior information of the
form of a lower and upper threshold on the reconstructed values to be supplied
to the reconstruction.
"""
from skimage.transform import iradon_sart
reconstruction_sart = iradon_sart(sinogram, theta=theta)
error = reconstruction_sart - image
print('SART (1 iteration) rms reconstruction error: %.3g'
% np.sqrt(np.mean(error**2)))
fig, ax = plt.subplots(2, 2, figsize=(8, 8.5))
ax1, ax2, ax3, ax4 = ax.ravel()
ax1.set_title("Reconstruction\nSART")
ax1.imshow(reconstruction_sart, cmap=plt.cm.Greys_r)
ax2.set_title("Reconstruction error\nSART")
ax2.imshow(reconstruction_sart - image, cmap=plt.cm.Greys_r, **imkwargs)
# Run a second iteration of SART by supplying the reconstruction
# from the first iteration as an initial estimate
reconstruction_sart2 = iradon_sart(sinogram, theta=theta,
image=reconstruction_sart)
error = reconstruction_sart2 - image
print('SART (2 iterations) rms reconstruction error: %.3g'
% np.sqrt(np.mean(error**2)))
ax3.set_title("Reconstruction\nSART, 2 iterations")
ax3.imshow(reconstruction_sart2, cmap=plt.cm.Greys_r)
ax4.set_title("Reconstruction error\nSART, 2 iterations")
ax4.imshow(reconstruction_sart2 - image, cmap=plt.cm.Greys_r, **imkwargs)
plt.show()
"""
.. image:: PLOT2RST.current_figure
.. [1] AC Kak, M Slaney, "Principles of Computerized Tomographic Imaging",
IEEE Press 1988. http://www.slaney.org/pct/pct-toc.html
.. [2] Wikipedia, Radon transform,
http://en.wikipedia.org/wiki/Radon_transform#Relationship_with_the_Fourier_transform
.. [3] S Kaczmarz, "Angenaeherte Aufloesung von Systemen linearer
Gleichungen", Bulletin International de l'Academie Polonaise des
Sciences et des Lettres 35 pp 355--357 (1937)
.. [4] AH Andersen, AC Kak, "Simultaneous algebraic reconstruction technique
(SART): a superior implementation of the ART algorithm", Ultrasonic
Imaging 6 pp 81--94 (1984)
"""
| bsd-3-clause |
jirivrany/MLND-capstone | src/preprocess.py | 1 | 3119 | # coding: utf-8
"""
@author Jiri Vrany
@license MIT
"""
import glob
import numpy as np
import tensorflow as tf
import os
from scipy.signal import wiener
from sklearn import preprocessing
def transform_row(row):
row = row.replace("[", "")
row = row.replace("],", "")
return row
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def create_label(filename):
"""
create label from the file name
"""
keys = {"Sphere": 0, "Vertical": 1, "Horizontal": 2}
names = filename.split(os.sep)
names = names[4].split()
return keys[names[0].strip()]
def sanitize_name(filename):
filename = filename.replace(".txt", "")
filename = filename.replace(" ", "_")
filename = filename.split(os.sep)[-1]
return filename
def create_files(files, output_dir, testset=False):
for i, fname in enumerate(files):
with open(fname) as fdata:
data = fdata.readlines()
data = [transform_row(data[0]).split(",") for row in data]
data = np.array(data)
data = data.astype(np.float64)
# filter the noise
data = wiener(data)
# normalize data
data = preprocessing.normalize(data, norm='l2')
# flip the data
data_s1 = np.fliplr(data)
data_s2 = np.flipud(data)
filename = os.path.join(
output_dir, sanitize_name(fname) + '.tfrecords')
filename_s1 = os.path.join(
output_dir, sanitize_name(fname) + 'split_left.tfrecords')
filename_s2 = os.path.join(
output_dir, sanitize_name(fname) + 'split_up.tfrecords')
if not i % 500:
print(i, 'writing', filename)
if testset:
pairs = ((filename, data),)
else:
pairs = ((filename, data), (filename_s1, data_s1), (filename_s2, data_s2))
for fn, dn in pairs:
writer = tf.python_io.TFRecordWriter(fn)
features = tf.train.Features(feature={
'height': _int64_feature(100),
'width': _int64_feature(100),
'depth': _int64_feature(1),
'label': _int64_feature(create_label(fname)),
'image': _bytes_feature(dn.tostring())
})
example = tf.train.Example(features=features)
writer.write(example.SerializeToString())
writer.close()
def create_training():
files = glob.glob('../data/gravimetrie/random_zeronoise/*.txt')
output_dir = '../data/gravimetrie/random_tf_normalized'
create_files(files, output_dir)
def create_validation():
files = glob.glob('../data/gravimetrie/validation_set/*.txt')
output_dir = '../data/gravimetrie/validation_set_tf'
create_files(files, output_dir, True)
if __name__ == "__main__":
create_validation()
| apache-2.0 |
StructuralNeurobiologyLab/SyConn | syconn/reps/connectivity_helper.py | 1 | 25105 | # -*- coding: utf-8 -*-
# SyConn - Synaptic connectivity inference toolkit
#
# Copyright (c) 2016 - now
# Max-Planck-Institute of Neurobiology, Munich, Germany
# Authors: Philipp Schubert, Joergen Kornfeld
import time
from logging import Logger
from typing import Optional, Union
import matplotlib
import networkx as nx
import numpy as np
from . import log_reps
from .. import global_params
from ..handler.prediction import int2str_converter
from ..reps import segmentation
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
from matplotlib import gridspec
from collections import defaultdict
from scipy import ndimage
def cs_id_to_partner_ids_vec(cs_ids):
sv_ids = np.right_shift(cs_ids, 32)
sv_ids = np.concatenate((sv_ids[:, None], (cs_ids - np.left_shift(sv_ids, 32))[:, None]),
axis=1)
return sv_ids
def cs_id_to_partner_inverse(partner_ids: Union[np.ndarray, list]) -> int:
"""
Input permutation invariant transformation to bit-shift-based ID, which is used for `syn` and `cs`
:class:`~syconn.reps.segmentation.SegmentationObject`.
Args:
partner_ids: :class:`~syconn.reps.super_segmentation_object.SuperSegmentationObject` IDs.
Returns:
Contact site or synapse fragment ID.
"""
partner_ids = np.sort(partner_ids).astype(np.uint32)
return (partner_ids[0] << 32) + partner_ids[1]
def connectivity_to_nx_graph(cd_dict):
"""
Creates a directed networkx graph with attributes from the
stored raw connectivity data.
Parameters
----------
cd_dict : dict
Returns
-------
"""
nxg = nx.DiGraph()
start = time.time()
for idx in range(0, len(cd_dict['ids'])):
# find out which one is pre and which one is post
# 1 indicates pre, i.e. identified as axon by the classifier
if cd_dict['neuron_partner_ax_0'][idx] == 1:
u = cd_dict['ssv_partner_0'][idx]
v = cd_dict['ssv_partner_1'][idx]
else:
v = cd_dict['ssv_partner_0'][idx]
u = cd_dict['ssv_partner_1'][idx]
nxg.add_edge(u, v)
# for each synapse create edge with attributes
log_reps.debug('Done with graph ({1} nodes) construction, took {0}'.format(
time.time() - start, nxg.number_of_nodes()))
return nxg
def load_cached_data_dict(thresh_syn_prob=None, axodend_only=True, wd=None,
syn_version=None):
"""
Loads all cached data from a contact site segmentation dataset into a
dictionary for further processing.
Parameters
----------
wd : str
syn_version : str
thresh_syn_prob : float
All synapses below `thresh_syn_prob` will be filtered.
axodend_only: If True, returns only axo-dendritic synapse, all
synapses otherwise.
Returns
-------
"""
if wd is None:
wd = global_params.config.working_dir
if thresh_syn_prob is None:
thresh_syn_prob = global_params.config['cell_objects']['thresh_synssv_proba']
start = time.time()
csd = segmentation.SegmentationDataset(obj_type='syn_ssv', working_dir=wd,
version=syn_version)
cd_dict = dict()
cd_dict['ids'] = csd.load_numpy_data('id')
# in um2, overlap of cs and sj
cd_dict['syn_size'] = \
csd.load_numpy_data('mesh_area') / 2 # as used in export_matrix
cd_dict['synaptivity_proba'] = \
csd.load_numpy_data('syn_prob')
# -1 for inhibitory, +1 for excitatory
cd_dict['syn_sign'] = \
csd.load_numpy_data('syn_sign').astype(np.int32)
cd_dict['coord_x'] = \
csd.load_numpy_data('rep_coord')[:, 0].astype(np.int32)
cd_dict['coord_y'] = \
csd.load_numpy_data('rep_coord')[:, 1].astype(np.int32)
cd_dict['coord_z'] = \
csd.load_numpy_data('rep_coord')[:, 2].astype(np.int32)
cd_dict['ssv_partner_0'] = \
csd.load_numpy_data('neuron_partners')[:, 0].astype(np.int32)
cd_dict['ssv_partner_1'] = \
csd.load_numpy_data('neuron_partners')[:, 1].astype(np.int32)
cd_dict['neuron_partner_ax_0'] = \
csd.load_numpy_data('partner_axoness')[:, 0].astype(np.int32)
cd_dict['neuron_partner_ax_1'] = \
csd.load_numpy_data('partner_axoness')[:, 1].astype(np.int32)
cd_dict['neuron_partner_ct_0'] = \
csd.load_numpy_data('partner_celltypes')[:, 0].astype(np.int32)
cd_dict['neuron_partner_ct_1'] = \
csd.load_numpy_data('partner_celltypes')[:, 1].astype(np.int32)
cd_dict['neuron_partner_sp_0'] = \
csd.load_numpy_data('partner_spiness')[:, 0].astype(np.int32)
cd_dict['neuron_partner_sp_1'] = \
csd.load_numpy_data('partner_spiness')[:, 1].astype(np.int32)
log_reps.debug('Getting {1} objects took: {0}'.format(time.time() - start,
len(csd.ids)))
idx_filter = cd_dict['synaptivity_proba'] >= thresh_syn_prob
cd_dict['neuron_partner_ax_0'][cd_dict['neuron_partner_ax_0'] == 3] = 1
cd_dict['neuron_partner_ax_0'][cd_dict['neuron_partner_ax_0'] == 4] = 1
cd_dict['neuron_partner_ax_1'][cd_dict['neuron_partner_ax_1'] == 3] = 1
cd_dict['neuron_partner_ax_1'][cd_dict['neuron_partner_ax_1'] == 4] = 1
n_syns = np.sum(idx_filter)
if axodend_only:
idx_filter = idx_filter & ((cd_dict['neuron_partner_ax_0'] +
cd_dict['neuron_partner_ax_1']) == 1)
n_syns_after_axdend = np.sum(idx_filter)
for k, v in cd_dict.items():
cd_dict[k] = v[idx_filter]
log_reps.debug('Finished conn. dictionary with {} synaptic '
'objects. {} above prob. threshold {}. '
'{} syns after axo-dend. filter '
'(changes only if "axodend_only=True").'
''.format(len(idx_filter), n_syns, thresh_syn_prob,
n_syns, n_syns_after_axdend))
return cd_dict
def generate_wiring_array(log: Optional[Logger] = None, **load_cached_data_dict_kwargs):
"""
Creates a 2D wiring array with quadratic shape (#cells x #cells) sorted by
cell type. X-axis: post-synaptic partners, y: pre-synaptic partners.
Assumes label 1 in 'partner_axoness' represents axon compartments. Does not
support ``axodend_only=False``!
Required for :func:`~plot_wiring` and :func:`plot_cumul_wiring`.
Notes:
* Work-in-progress.
Args:
log: Logger.
**load_cached_data_dict_kwargs: See :func:`~load_cached_data_dict`
Returns:
The wiring diagram as a 2D array.
"""
if 'axodend_only=True' in load_cached_data_dict_kwargs:
raise ValueError("'axodend_only=False' is not supported!")
cd_dict = load_cached_data_dict(**load_cached_data_dict_kwargs)
if log is None:
log = log_reps
# analyze scope of underlying data
all_ssv_ids = set(cd_dict['ssv_partner_0'].tolist()).union(set(cd_dict['ssv_partner_1']))
n_cells = len(all_ssv_ids)
wiring = np.zeros((n_cells, n_cells))
celltypes = np.unique([cd_dict['neuron_partner_ct_0'], cd_dict['neuron_partner_ct_1']])
ssvs_flattened = []
borders = []
np.random.seed(0)
# create list of cells used for celltype-sorted x and y axis
for ct in celltypes:
l0 = cd_dict['ssv_partner_0'][cd_dict['neuron_partner_ct_0'] == ct]
l1 = cd_dict['ssv_partner_1'][cd_dict['neuron_partner_ct_1'] == ct]
curr_ct_ssvs = np.unique(np.concatenate([l0, l1]))
# destroy sorting of `np.unique`
np.random.shuffle(curr_ct_ssvs)
curr_ct_ssvs = curr_ct_ssvs.tolist()
ssvs_flattened += curr_ct_ssvs
borders.append(len(curr_ct_ssvs))
borders = np.cumsum(borders)
assert borders[-1] == len(wiring)
# sum per cell-pair synaptic connections multiplied by synaptic sign (-1 or 1)
cumul_syn_dc = defaultdict(list)
# synapse size: in um2, mesh area of the overlap between cs and sj divided by 2
for ii, syn_size in enumerate(cd_dict['syn_size']):
cell_pair = (cd_dict['ssv_partner_0'][ii], cd_dict['ssv_partner_1'][ii])
if cd_dict['neuron_partner_ax_1'][ii] == 1:
cell_pair = cell_pair[::-1]
elif cd_dict['neuron_partner_ax_0'][ii] == 1:
pass
else:
raise ValueError('No axon prediction found within synapse.')
cumul_syn_dc[cell_pair].append(syn_size * cd_dict['syn_sign'][ii])
cumul_syn_dc = dict(cumul_syn_dc)
rev_map = {ssvs_flattened[ii]: ii for ii in range(n_cells)}
for pre_id, post_id in cumul_syn_dc:
pre_ix = rev_map[pre_id]
post_ix = rev_map[post_id]
syns = cumul_syn_dc[(pre_id, post_id)]
syns_pos = np.sum([syn for syn in syns if syn > 0])
syns_neg = np.abs(np.sum([syn for syn in syns if syn < 0]))
sign = -1 if syns_neg > syns_pos else 1
wiring[post_ix, pre_ix] = sign * (syns_pos + syns_neg)
ct_borders = [(int2str_converter(celltypes[ii], gt_type='ctgt_v2'), borders[ii]) for ii in range(len(celltypes))]
log.info(f'Found the following cell types (label, starting index in wiring diagram: {ct_borders}')
return wiring, borders[:-1]
def plot_wiring(path, wiring, den_borders, ax_borders, cumul=False, log: Optional[Logger] = None):
"""Plot type sorted connectivity matrix. Saved in folder given by `path`.
Notes:
* Work-in-progress.
* `wiring` is generated by :func:`~generate_wiring_array`.
Parameters
----------
path: Path to directory.
wiring : Quadratic 2D array of size #cells x #cells, x-axis: dendrite partners, y: axon partners.
den_borders: Cell type borders on post synaptic site. Used to split the connectivity matrix into quadrants.
ax_borders: Cell type borders on pre synaptic site. Used to split the connectivity matrix into quadrants.
cumul: Accumulate quadrant values.
log: Logger.
"""
if log is None:
log = log_reps
if cumul:
entry_width = 1
else:
entry_width = int(np.max([20 / 29297 * wiring.shape[0], 1]))
intensity_plot = np.array(wiring)
intensity_plot_neg = intensity_plot < 0
intensity_plot_pos = intensity_plot > 0
borders = [0] + list(ax_borders) + [intensity_plot.shape[1]]
for i_border in range(1, len(borders)):
start = borders[i_border - 1]
end = borders[i_border]
sign = np.sum(intensity_plot_pos[:, start: end]) - \
np.sum(intensity_plot_neg[:, start: end]) > 0
# adapt either negative or positive elements according to the majority vote
if sign:
intensity_plot[:, start: end][intensity_plot[:, start: end] < 0] *= -1
else:
intensity_plot[:, start: end][intensity_plot[:, start: end] > 0] *= -1
intensity_plot_neg = intensity_plot[intensity_plot < 0]
intensity_plot_pos = intensity_plot[intensity_plot > 0]
int_cut_pos = np.mean(intensity_plot_pos) + np.std(intensity_plot_pos)
if np.isnan(int_cut_pos):
int_cut_pos = 1
int_cut_neg = np.abs(np.mean(intensity_plot_neg)) + np.std(intensity_plot_neg)
if np.isnan(int_cut_neg):
int_cut_neg = 1
# balance max values
intensity_plot[intensity_plot > 0] = intensity_plot[intensity_plot > 0] / int_cut_pos
intensity_plot[intensity_plot < 0] = intensity_plot[intensity_plot < 0] / int_cut_neg
log.info(f'1-sigma cut-off for excitatory cells: {int_cut_pos}')
log.info(f'1-sigma cut-off for inhibitory cells: {int_cut_neg}')
log.debug(f'Initial wiring diagram shape: {intensity_plot.shape}')
if not cumul:
# TODO: refactor, this becomes slow for shapes > (10k, 10k)
log_reps.debug(f'Increasing the matrix entries from pixels of edge length 1 to {entry_width} .')
for k, b in enumerate(den_borders):
b += k * entry_width
intensity_plot = np.concatenate(
(intensity_plot[:b, :], np.zeros((entry_width, intensity_plot.shape[1])),
intensity_plot[b:, :]), axis=0)
for k, b in enumerate(ax_borders):
b += k * entry_width
intensity_plot = np.concatenate(
(intensity_plot[:, :b], np.zeros((intensity_plot.shape[0], entry_width)),
intensity_plot[:, b:]), axis=1)
log.debug(f'Wiring diagram shape after adding hline columns and rows: {intensity_plot.shape}')
else:
log.debug(f'Wiring diagram shape after adding hline columns and rows: {intensity_plot.shape}')
# TODO: becomes slow for large entry_width
bin_intensity_plot = intensity_plot != 0
bin_intensity_plot = bin_intensity_plot.astype(np.float32)
intensity_plot = ndimage.convolve(intensity_plot, np.ones((entry_width, entry_width)))
bin_intensity_plot = ndimage.convolve(bin_intensity_plot, np.ones((entry_width, entry_width)))
intensity_plot /= bin_intensity_plot
matplotlib.rcParams.update({'font.size': 14})
fig = plt.figure()
# Create scatter plot, why?
gs = gridspec.GridSpec(1, 2, width_ratios=[20, 1])
gs.update(wspace=0.05, hspace=0.08)
ax = plt.subplot(gs[0, 0], frameon=False)
cax = ax.matshow(intensity_plot.transpose((1, 0)),
cmap=diverge_map(),
extent=[0, intensity_plot.shape[0], intensity_plot.shape[1], 0],
interpolation="none", vmin=-1,
vmax=1)
ax.set_xlabel('Post', fontsize=18)
ax.set_ylabel('Pre', fontsize=18)
ax.set_xlim(0, intensity_plot.shape[0])
ax.set_ylim(0, intensity_plot.shape[1])
plt.grid(False)
plt.axis('off')
if cumul:
for k, b in enumerate(den_borders):
plt.axvline(b, color='k', lw=0.5, snap=False,
antialiased=True)
for k, b in enumerate(ax_borders):
plt.axhline(b, color='k', lw=0.5, snap=False,
antialiased=True)
else:
for k, b in enumerate(den_borders):
b += k * entry_width
plt.axvline(b + 0.5, color='k', lw=0.5, snap=False,
antialiased=True)
for k, b in enumerate(ax_borders):
b += k * entry_width
plt.axhline(b + 0.5, color='k', lw=0.5, snap=False,
antialiased=True)
cbar_ax = plt.subplot(gs[0, 1])
cbar_ax.yaxis.set_ticks_position('none')
cb = fig.colorbar(cax, cax=cbar_ax, ticks=[])
plt.close()
if cumul:
mat_name = "/matrix_cum_%d_%d" % (int(int_cut_neg * 100000), int(int_cut_pos * 100000))
fig.savefig(path + mat_name + '.png', dpi=600)
else:
mat_name = "/matrix_%d_%d_%d" % (
intensity_plot.shape[0], int(int_cut_neg * 100000), int(int_cut_pos * 100000))
fig.savefig(path + mat_name + '.png', dpi=600)
# TODO: refine summary log
sum_str = f"Cut value negative: {int_cut_neg}\n"
sum_str += f"Cut value positive: {int_cut_pos}\n"
sum_str += f"Post-synaptic borders: {den_borders}\n"
sum_str += f"Pre-synaptic borders: {ax_borders}\n"
with open(path + mat_name + '.txt', 'w') as f:
f.write(sum_str)
def plot_cumul_wiring(path, wiring, borders, min_cumul_synarea=0, log: Optional[Logger] = None):
"""
Synaptic area between cell type pairs. Synaptic areas are summed and then
divided by the number of cell pairs to compute the average cumulated synaptic area
between each
Notes:
* Work-in-progress.
* `wiring` is generated by :func:`~generate_wiring_array`.
Args:
path:
wiring:
borders:
min_cumul_synarea:
log: Logger.
"""
cumul_matrix = np.zeros([len(borders) + 1, len(borders) + 1])
borders = [0] + list(borders) + [wiring.shape[1]]
for i_ax_border in range(1, len(borders)):
for i_de_border in range(1, len(borders)):
ax_start = borders[i_ax_border - 1]
ax_end = borders[i_ax_border]
de_start = borders[i_de_border - 1]
de_end = borders[i_de_border]
cumul = wiring[de_start: de_end, ax_start: ax_end].flatten()
pos = np.sum(cumul[cumul > 0])
neg = np.abs(np.sum(cumul[cumul < 0]))
sign = -1 if neg > pos else 1
cumul = sign * (pos + neg)
if np.abs(cumul) < min_cumul_synarea:
cumul = 0
else:
# convert to density (average cumul. synaptic area between cell pairs)
cumul /= (ax_end - ax_start) * (de_end - de_start)
cumul_matrix[i_de_border - 1, i_ax_border - 1] = cumul
plot_wiring(path, cumul_matrix, list(range(1, len(borders) + 1)), list(range(1, len(borders) + 1)), cumul=True,
log=log)
def make_colormap(seq):
"""Return a LinearSegmentedColormap
seq: a sequence of floats and RGB-tuples. The floats should be increasing
and in the interval (0,1).
"""
seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for i, item in enumerate(seq):
if isinstance(item, float):
r1, g1, b1 = seq[i - 1]
r2, g2, b2 = seq[i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
return mcolors.LinearSegmentedColormap('CustomMap', cdict)
def diverge_map(high=(239 / 255., 65 / 255., 50 / 255.),
low=(39 / 255., 184 / 255., 148 / 255.)):
"""Low and high are colors that will be used for the two
ends of the spectrum. they can be either color strings
or rgb color tuples
"""
c = mcolors.ColorConverter().to_rgb
if isinstance(low, str): low = c(low)
if isinstance(high, str): high = c(high)
return make_colormap([low, c('white'), 0.5, c('white'), high])
def connectivity_hists_j0251(proba_thresh_syn: float = 0.8, proba_thresh_celltype: float = None,
r=(0.05, 2)):
"""
Args:
proba_thresh_syn: Synapse probability. Filters synapses below threshold.
proba_thresh_celltype: Cell type probability. Filters cells below threshold.
r: Range of synapse mesh area (um^2).
Returns:
"""
from syconn.handler.prediction import int2str_converter, certainty_estimate
from syconn.reps.segmentation import SegmentationDataset
from syconn.reps.super_segmentation import SuperSegmentationDataset
from scipy.special import softmax
import pandas as pd
import tqdm
import os
import seaborn as sns
def ctclass_converter(x): return int2str_converter(x, gt_type='ctgt_j0251_v2')
target_dir = f'/wholebrain/scratch/pschuber/tmp/thresh{int(proba_thresh_syn * 100)}/'
os.makedirs(target_dir, exist_ok=True)
nclass = 11
log_scale = True
plot_n_celltypes = 5
palette = sns.color_palette('dark', n_colors=nclass)
palette = {ctclass_converter(kk): palette[kk] for kk in range(nclass)}
sd_syn_ssv = SegmentationDataset('syn_ssv')
if proba_thresh_celltype is not None:
ssd = SuperSegmentationDataset()
ct_probas = [certainty_estimate(proba) for proba in tqdm.tqdm(ssd.load_cached_data('celltype_cnn_e3_probas'),
desc='Cells')]
ct_proba_lookup = {cellid: ct_probas[k] for k, cellid in enumerate(ssd.ssv_ids)}
del ct_probas
ax = sd_syn_ssv.load_cached_data('partner_axoness')
ct = sd_syn_ssv.load_cached_data('partner_celltypes')
area = sd_syn_ssv.load_cached_data('mesh_area')
# size = sd_syn_ssv.load_cached_data('size')
# syn_sign = sd_syn_ssv.load_cached_data('syn_sign')
# area *= syn_sign
partners = sd_syn_ssv.load_cached_data('neuron_partners')
proba = sd_syn_ssv.load_cached_data('syn_prob')
m = (proba >= proba_thresh_syn) & (area >= r[0]) & (area <= r[1])
print(f'Found {np.sum(m)} synapses after filtering with probaility threshold {proba_thresh_syn} and '
f'size filter (min/max [um^2]: {r}).')
ax[(ax == 3) | (ax == 4)] = 1 # set boutons to axon class
ax[(ax == 5) | (ax == 6)] = 0 # set spine head and neck to dendrite class
m = m & (np.sum(ax, axis=1) == 1) # get all axo-dendritic synapses
print(f'Found {np.sum(m)} synapses after filtering non axo-dendritic ones.')
ct = ct[m]
ax = ax[m]
area = area[m]
# size = size[m]
partners = partners[m]
if log_scale:
area = np.log10(area)
r = np.log(r)
ct_receiving = {ctclass_converter(k): {ctclass_converter(kk): [] for kk in range(nclass)} for k in range(nclass)}
ct_targets = {ctclass_converter(k): {ctclass_converter(kk): [] for kk in range(nclass)} for k in range(nclass)}
for ix in tqdm.tqdm(range(area.shape[0]), total=area.shape[0], desc='Synapses'):
post_ix, pre_ix = np.argsort(ax[ix])
if proba_thresh_celltype is not None:
post_cell_id, pre_cell_id = partners[ix][post_ix], partners[ix][pre_ix]
celltype_probas = np.array([ct_proba_lookup[post_cell_id], ct_proba_lookup[pre_cell_id]])
if np.any(celltype_probas < proba_thresh_celltype):
continue
syn_ct = ct[ix]
pre_ct = ctclass_converter(syn_ct[pre_ix])
post_ct = ctclass_converter(syn_ct[post_ix])
ct_receiving[post_ct][pre_ct].append(area[ix])
ct_targets[pre_ct][post_ct].append(area[ix])
for ct_label in tqdm.tqdm(map(ctclass_converter, range(nclass)), total=nclass):
data_rec = ct_receiving[ct_label]
sizes = np.argsort([len(v) for v in data_rec.values()])[::-1]
highest_cts = np.array(list(data_rec.keys()))[sizes][:plot_n_celltypes]
df = pd.DataFrame(data={'mesh_area': np.concatenate([data_rec[k] for k in highest_cts]),
'cell_type': np.concatenate([[k]*len(data_rec[k]) for k in highest_cts])})
create_kde(f'{target_dir}/incoming{ct_label}.png', df, palette=palette, r=r)
df = pd.DataFrame(data={'mesh_area[um^2]': [np.sum(10**np.array(data_rec[k])) for k in data_rec],
'n_synapses': [len(data_rec[k]) for k in data_rec],
'cell_type': [k for k in data_rec]})
df.to_csv(f'{target_dir}/incoming{ct_label}_sum.csv')
data_out = ct_targets[ct_label]
sizes = np.argsort([len(v) for v in data_out.values()])[::-1]
highest_cts = np.array(list(data_out.keys()))[sizes][:plot_n_celltypes]
df = pd.DataFrame(data={'mesh_area': np.concatenate([data_out[k] for k in highest_cts]),
'cell_type': np.concatenate([[k]*len(data_out[k]) for k in highest_cts])})
create_kde(f'{target_dir}/outgoing{ct_label}.png', df, palette=palette, r=r)
df = pd.DataFrame(data={'mesh_area[um^2]': [np.sum(10**np.array(data_out[k])) for k in data_out],
'n_synapses': [len(data_out[k]) for k in data_out],
'cell_type': [k for k in data_out]})
df.to_csv(f'{target_dir}/outgoing{ct_label}_sum.csv')
def create_kde(dest_p, qs, ls=20, legend=False, r=None, **kwargs):
"""
Parameters
----------
dest_p :
qs :
r :
legend :
ls :
Returns
-------
"""
r = np.array(r)
import seaborn as sns
# fig, ax = plt.subplots()
plt.figure()
# ax = sns.swarmplot(data=qs, clip_on=False, **kwargs)
# fill=True, kde=True, kde_kws=dict(bw_adjust=0.5), common_bins=False,
sns.displot(data=qs, x="mesh_area", hue="cell_type",
# kind="kde", bw_adjust=0.5,
fill=True, kde=True, kde_kws=dict(bw_adjust=0.5), common_bins=True,
edgecolor='none', multiple="layer", common_norm=False, stat='density',
**kwargs) # , common_norm=False
if r is not None:
xmin, xmax = plt.xlim()
if xmin > r[0]:
r[0] = xmin
if xmax < r[1]:
r[1] = xmax
plt.xlim(r)
# if not legend:
# plt.gca().legend().set_visible(False)
# # sns.set_style("ticks", {"xtick.major.size": 20, "ytick.major.size": 20})
# ax.tick_params(axis='x', which='major', labelsize=ls, direction='out',
# length=4, width=3, right="off", top="off", pad=10)
# ax.tick_params(axis='y', which='major', labelsize=ls, direction='out',
# length=4, width=3, right="off", top="off", pad=10)
# ax.tick_params(axis='x', which='minor', labelsize=ls, direction='out',
# length=4, width=3, right="off", top="off", pad=10)
# ax.tick_params(axis='y', which='minor', labelsize=ls, direction='out',
# length=4, width=3, right="off", top="off", pad=10)
# ax.spines['left'].set_linewidth(3)
# ax.spines['bottom'].set_linewidth(3)
# ax.spines['right'].set_visible(False)
# ax.spines['top'].set_visible(False)
plt.savefig(dest_p, dpi=300)
qs.to_csv(dest_p[:-4] + ".csv")
plt.close('all')
| gpl-2.0 |
eclee25/flu-SDI-exploratory-age | scripts/create_fluseverity_figs/Supp_pandemic_analyses.py | 1 | 2656 | #!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 8/12/14
###Function: analyze pandemic dynamics when considering different baseline periods - between two pandemic peaks, time series after November 2009, 2008-09 baseline for entire pandemic period
###Import data: SQL_export/OR_allweeks_outpatient.csv, SQL_export/totalpop.csv
###Command Line: python Supp_pandemic_analyses.py
##############################################
### notes ###
# Incidence per 100,000 is normalized by total population by second calendar year of the flu season
### packages/modules ###
import csv
import matplotlib.pyplot as plt
## local modules ##
import functions as fxn
### data structures ###
### functions ###
### data files ###
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
### called/local plotting parameters ###
ps = fxn.pseasons
sl = fxn.gp_seasonlabels
colvec = fxn.gp_colors
wklab = fxn.gp_weeklabels
norm = fxn.gp_normweeks
fs = 24
fssml = 16
BL = fxn.gp_pandemicbaseline
tricolor = fxn.gp_agecolors
### program ###
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid, d_OR = fxn.week_OR_processing(incid, pop)
d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
for BL_txt, col in zip(BL, tricolor):
d_zOR_pan = fxn.week_zOR_processing_pandemic(d_wk, d_OR, BL_txt)
d_incid53ls_pan, d_OR53ls_pan, d_zOR53ls_pan = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR_pan)
plotvals = d_zOR53ls_pan[9] + d_zOR53ls_pan[10]
plt.plot(plotvals, marker = 'o', color = col, label = BL_txt, linewidth = 2)
plt.plot(d_zOR53ls[9], marker = 'o', color = 'k', linewidth=2, label='2008-09 season')
plt.xticks(range(2*len(wklab))[::5], wklab[::5] + wklab[::5])
plt.xlabel('Week Number (2008-09 and 2009-10)', fontsize=fs)
plt.ylabel('zOR', fontsize=fs)
plt.legend(loc='lower left')
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs/Supp/zOR_time_pandemic.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
| mit |
jkettleb/iris | docs/iris/example_code/Oceanography/atlantic_profiles.py | 6 | 3398 | """
Oceanographic profiles and T-S diagrams
=======================================
This example demonstrates how to plot vertical profiles of different
variables in the same axes, and how to make a scatter plot of two
variables. There is an oceanographic theme but the same techniques are
equally applicable to atmospheric or other kinds of data.
The data used are profiles of potential temperature and salinity in the
Equatorial and South Atlantic, output from an ocean model.
The y-axis of the first plot produced will be automatically inverted due to the
presence of the attribute positive=down on the depth coordinate. This means
depth values intuitively increase downward on the y-axis.
"""
import iris
import iris.iterate
import iris.plot as iplt
import matplotlib.pyplot as plt
def main():
# Load the gridded temperature and salinity data.
fname = iris.sample_data_path('atlantic_profiles.nc')
cubes = iris.load(fname)
theta, = cubes.extract('sea_water_potential_temperature')
salinity, = cubes.extract('sea_water_practical_salinity')
# Extract profiles of temperature and salinity from a particular point in
# the southern portion of the domain, and limit the depth of the profile
# to 1000m.
lon_cons = iris.Constraint(longitude=330.5)
lat_cons = iris.Constraint(latitude=lambda l: -10 < l < -9)
depth_cons = iris.Constraint(depth=lambda d: d <= 1000)
theta_1000m = theta.extract(depth_cons & lon_cons & lat_cons)
salinity_1000m = salinity.extract(depth_cons & lon_cons & lat_cons)
# Plot these profiles on the same set of axes. In each case we call plot
# with two arguments, the cube followed by the depth coordinate. Putting
# them in this order places the depth coordinate on the y-axis.
# The first plot is in the default axes. We'll use the same color for the
# curve and its axes/tick labels.
fig = plt.figure(figsize=(5, 6))
temperature_color = (.3, .4, .5)
ax1 = plt.gca()
iplt.plot(theta_1000m, theta_1000m.coord('depth'), linewidth=2,
color=temperature_color, alpha=.75)
ax1.set_xlabel('Potential Temperature / K', color=temperature_color)
ax1.set_ylabel('Depth / m')
for ticklabel in ax1.get_xticklabels():
ticklabel.set_color(temperature_color)
# To plot salinity in the same axes we use twiny(). We'll use a different
# color to identify salinity.
salinity_color = (.6, .1, .15)
ax2 = plt.gca().twiny()
iplt.plot(salinity_1000m, salinity_1000m.coord('depth'), linewidth=2,
color=salinity_color, alpha=.75)
ax2.set_xlabel('Salinity / PSU', color=salinity_color)
for ticklabel in ax2.get_xticklabels():
ticklabel.set_color(salinity_color)
plt.tight_layout()
iplt.show()
# Now plot a T-S diagram using scatter. We'll use all the profiles here,
# and each point will be coloured according to its depth.
plt.figure(figsize=(6, 6))
depth_values = theta.coord('depth').points
for s, t in iris.iterate.izip(salinity, theta, coords='depth'):
iplt.scatter(s, t, c=depth_values, marker='+', cmap='RdYlBu_r')
ax = plt.gca()
ax.set_xlabel('Salinity / PSU')
ax.set_ylabel('Potential Temperature / K')
cb = plt.colorbar(orientation='horizontal')
cb.set_label('Depth / m')
plt.tight_layout()
iplt.show()
if __name__ == '__main__':
main()
| lgpl-3.0 |
dimroc/tensorflow-mnist-tutorial | lib/python3.6/site-packages/matplotlib/font_manager.py | 6 | 47238 | """
A module for finding, managing, and using fonts across platforms.
This module provides a single :class:`FontManager` instance that can
be shared across backends and platforms. The :func:`findfont`
function returns the best TrueType (TTF) font file in the local or
system font path that matches the specified :class:`FontProperties`
instance. The :class:`FontManager` also handles Adobe Font Metrics
(AFM) font files for use by the PostScript backend.
The design is based on the `W3C Cascading Style Sheet, Level 1 (CSS1)
font specification <http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_.
Future versions may implement the Level 2 or 2.1 specifications.
Experimental support is included for using `fontconfig` on Unix
variant platforms (Linux, OS X, Solaris). To enable it, set the
constant ``USE_FONTCONFIG`` in this file to ``True``. Fontconfig has
the advantage that it is the standard way to look up fonts on X11
platforms, so if a font is installed, it is much more likely to be
found.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import cPickle as pickle
"""
KNOWN ISSUES
- documentation
- font variant is untested
- font stretch is incomplete
- font size is incomplete
- font size_adjust is incomplete
- default font algorithm needs improvement and testing
- setWeights function needs improvement
- 'light' is an invalid weight value, remove it.
- update_fonts not implemented
Authors : John Hunter <[email protected]>
Paul Barrett <[email protected]>
Michael Droettboom <[email protected]>
Copyright : John Hunter (2004,2005), Paul Barrett (2004,2005)
License : matplotlib license (PSF compatible)
The font directory code is from ttfquery,
see license/LICENSE_TTFQUERY.
"""
from collections import Iterable
import json
import os
import sys
from threading import Timer
import warnings
import matplotlib
from matplotlib import afm, cbook, ft2font, rcParams, get_cachedir
from matplotlib.cbook import is_string_like
from matplotlib.compat import subprocess
from matplotlib.fontconfig_pattern import (
parse_fontconfig_pattern, generate_fontconfig_pattern)
try:
from functools import lru_cache
except ImportError:
from functools32 import lru_cache
USE_FONTCONFIG = False
verbose = matplotlib.verbose
font_scalings = {
'xx-small' : 0.579,
'x-small' : 0.694,
'small' : 0.833,
'medium' : 1.0,
'large' : 1.200,
'x-large' : 1.440,
'xx-large' : 1.728,
'larger' : 1.2,
'smaller' : 0.833,
None : 1.0}
stretch_dict = {
'ultra-condensed' : 100,
'extra-condensed' : 200,
'condensed' : 300,
'semi-condensed' : 400,
'normal' : 500,
'semi-expanded' : 600,
'expanded' : 700,
'extra-expanded' : 800,
'ultra-expanded' : 900}
weight_dict = {
'ultralight' : 100,
'light' : 200,
'normal' : 400,
'regular' : 400,
'book' : 400,
'medium' : 500,
'roman' : 500,
'semibold' : 600,
'demibold' : 600,
'demi' : 600,
'bold' : 700,
'heavy' : 800,
'extra bold' : 800,
'black' : 900}
font_family_aliases = set([
'serif',
'sans-serif',
'sans serif',
'cursive',
'fantasy',
'monospace',
'sans'])
# OS Font paths
MSFolders = \
r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders'
MSFontDirectories = [
r'SOFTWARE\Microsoft\Windows NT\CurrentVersion\Fonts',
r'SOFTWARE\Microsoft\Windows\CurrentVersion\Fonts']
X11FontDirectories = [
# an old standard installation point
"/usr/X11R6/lib/X11/fonts/TTF/",
"/usr/X11/lib/X11/fonts",
# here is the new standard location for fonts
"/usr/share/fonts/",
# documented as a good place to install new fonts
"/usr/local/share/fonts/",
# common application, not really useful
"/usr/lib/openoffice/share/fonts/truetype/",
]
OSXFontDirectories = [
"/Library/Fonts/",
"/Network/Library/Fonts/",
"/System/Library/Fonts/",
# fonts installed via MacPorts
"/opt/local/share/fonts"
""
]
if not USE_FONTCONFIG and sys.platform != 'win32':
home = os.environ.get('HOME')
if home is not None:
# user fonts on OSX
path = os.path.join(home, 'Library', 'Fonts')
OSXFontDirectories.append(path)
path = os.path.join(home, '.fonts')
X11FontDirectories.append(path)
def get_fontext_synonyms(fontext):
"""
Return a list of file extensions extensions that are synonyms for
the given file extension *fileext*.
"""
return {'ttf': ('ttf', 'otf'),
'otf': ('ttf', 'otf'),
'afm': ('afm',)}[fontext]
def list_fonts(directory, extensions):
"""
Return a list of all fonts matching any of the extensions,
possibly upper-cased, found recursively under the directory.
"""
pattern = ';'.join(['*.%s;*.%s' % (ext, ext.upper())
for ext in extensions])
return cbook.listFiles(directory, pattern)
def win32FontDirectory():
"""
Return the user-specified font directory for Win32. This is
looked up from the registry key::
\\HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders\Fonts
If the key is not found, $WINDIR/Fonts will be returned.
"""
try:
from six.moves import winreg
except ImportError:
pass # Fall through to default
else:
try:
user = winreg.OpenKey(winreg.HKEY_CURRENT_USER, MSFolders)
try:
try:
return winreg.QueryValueEx(user, 'Fonts')[0]
except OSError:
pass # Fall through to default
finally:
winreg.CloseKey(user)
except OSError:
pass # Fall through to default
return os.path.join(os.environ['WINDIR'], 'Fonts')
def win32InstalledFonts(directory=None, fontext='ttf'):
"""
Search for fonts in the specified font directory, or use the
system directories if none given. A list of TrueType font
filenames are returned by default, or AFM fonts if *fontext* ==
'afm'.
"""
from six.moves import winreg
if directory is None:
directory = win32FontDirectory()
fontext = get_fontext_synonyms(fontext)
key, items = None, {}
for fontdir in MSFontDirectories:
try:
local = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, fontdir)
except OSError:
continue
if not local:
return list_fonts(directory, fontext)
try:
for j in range(winreg.QueryInfoKey(local)[1]):
try:
key, direc, any = winreg.EnumValue( local, j)
if not is_string_like(direc):
continue
if not os.path.dirname(direc):
direc = os.path.join(directory, direc)
direc = os.path.abspath(direc).lower()
if os.path.splitext(direc)[1][1:] in fontext:
items[direc] = 1
except EnvironmentError:
continue
except WindowsError:
continue
except MemoryError:
continue
return list(six.iterkeys(items))
finally:
winreg.CloseKey(local)
return None
def OSXInstalledFonts(directories=None, fontext='ttf'):
"""
Get list of font files on OS X - ignores font suffix by default.
"""
if directories is None:
directories = OSXFontDirectories
fontext = get_fontext_synonyms(fontext)
files = []
for path in directories:
if fontext is None:
files.extend(cbook.listFiles(path, '*'))
else:
files.extend(list_fonts(path, fontext))
return files
@lru_cache()
def _call_fc_list():
"""Cache and list the font filenames known to `fc-list`.
"""
# Delay the warning by 5s.
timer = Timer(5, lambda: warnings.warn(
'Matplotlib is building the font cache using fc-list. '
'This may take a moment.'))
timer.start()
try:
out = subprocess.check_output([str('fc-list'), '--format=%{file}'])
except (OSError, subprocess.CalledProcessError):
return []
finally:
timer.cancel()
fnames = []
for fname in out.split(b'\n'):
try:
fname = six.text_type(fname, sys.getfilesystemencoding())
except UnicodeDecodeError:
continue
fnames.append(fname)
return fnames
def get_fontconfig_fonts(fontext='ttf'):
"""List the font filenames known to `fc-list` having the given extension.
"""
fontext = get_fontext_synonyms(fontext)
return [fname for fname in _call_fc_list()
if os.path.splitext(fname)[1][1:] in fontext]
def findSystemFonts(fontpaths=None, fontext='ttf'):
"""
Search for fonts in the specified font paths. If no paths are
given, will use a standard set of system paths, as well as the
list of fonts tracked by fontconfig if fontconfig is installed and
available. A list of TrueType fonts are returned by default with
AFM fonts as an option.
"""
fontfiles = set()
fontexts = get_fontext_synonyms(fontext)
if fontpaths is None:
if sys.platform == 'win32':
fontdir = win32FontDirectory()
fontpaths = [fontdir]
# now get all installed fonts directly...
for f in win32InstalledFonts(fontdir):
base, ext = os.path.splitext(f)
if len(ext)>1 and ext[1:].lower() in fontexts:
fontfiles.add(f)
else:
fontpaths = X11FontDirectories
# check for OS X & load its fonts if present
if sys.platform == 'darwin':
for f in OSXInstalledFonts(fontext=fontext):
fontfiles.add(f)
for f in get_fontconfig_fonts(fontext):
fontfiles.add(f)
elif isinstance(fontpaths, six.string_types):
fontpaths = [fontpaths]
for path in fontpaths:
files = list_fonts(path, fontexts)
for fname in files:
fontfiles.add(os.path.abspath(fname))
return [fname for fname in fontfiles if os.path.exists(fname)]
def weight_as_number(weight):
"""
Return the weight property as a numeric value. String values
are converted to their corresponding numeric value.
"""
if isinstance(weight, six.string_types):
try:
weight = weight_dict[weight.lower()]
except KeyError:
weight = 400
elif weight in range(100, 1000, 100):
pass
else:
raise ValueError('weight not a valid integer')
return weight
class FontEntry(object):
"""
A class for storing Font properties. It is used when populating
the font lookup dictionary.
"""
def __init__(self,
fname ='',
name ='',
style ='normal',
variant='normal',
weight ='normal',
stretch='normal',
size ='medium',
):
self.fname = fname
self.name = name
self.style = style
self.variant = variant
self.weight = weight
self.stretch = stretch
try:
self.size = str(float(size))
except ValueError:
self.size = size
def __repr__(self):
return "<Font '%s' (%s) %s %s %s %s>" % (
self.name, os.path.basename(self.fname), self.style, self.variant,
self.weight, self.stretch)
def ttfFontProperty(font):
"""
A function for populating the :class:`FontKey` by extracting
information from the TrueType font file.
*font* is a :class:`FT2Font` instance.
"""
name = font.family_name
# Styles are: italic, oblique, and normal (default)
sfnt = font.get_sfnt()
sfnt2 = sfnt.get((1,0,0,2))
sfnt4 = sfnt.get((1,0,0,4))
if sfnt2:
sfnt2 = sfnt2.decode('macroman').lower()
else:
sfnt2 = ''
if sfnt4:
sfnt4 = sfnt4.decode('macroman').lower()
else:
sfnt4 = ''
if sfnt4.find('oblique') >= 0:
style = 'oblique'
elif sfnt4.find('italic') >= 0:
style = 'italic'
elif sfnt2.find('regular') >= 0:
style = 'normal'
elif font.style_flags & ft2font.ITALIC:
style = 'italic'
else:
style = 'normal'
# Variants are: small-caps and normal (default)
# !!!! Untested
if name.lower() in ['capitals', 'small-caps']:
variant = 'small-caps'
else:
variant = 'normal'
# Weights are: 100, 200, 300, 400 (normal: default), 500 (medium),
# 600 (semibold, demibold), 700 (bold), 800 (heavy), 900 (black)
# lighter and bolder are also allowed.
weight = None
for w in six.iterkeys(weight_dict):
if sfnt4.find(w) >= 0:
weight = w
break
if not weight:
if font.style_flags & ft2font.BOLD:
weight = 700
else:
weight = 400
weight = weight_as_number(weight)
# Stretch can be absolute and relative
# Absolute stretches are: ultra-condensed, extra-condensed, condensed,
# semi-condensed, normal, semi-expanded, expanded, extra-expanded,
# and ultra-expanded.
# Relative stretches are: wider, narrower
# Child value is: inherit
if (sfnt4.find('narrow') >= 0 or sfnt4.find('condensed') >= 0 or
sfnt4.find('cond') >= 0):
stretch = 'condensed'
elif sfnt4.find('demi cond') >= 0:
stretch = 'semi-condensed'
elif sfnt4.find('wide') >= 0 or sfnt4.find('expanded') >= 0:
stretch = 'expanded'
else:
stretch = 'normal'
# Sizes can be absolute and relative.
# Absolute sizes are: xx-small, x-small, small, medium, large, x-large,
# and xx-large.
# Relative sizes are: larger, smaller
# Length value is an absolute font size, e.g., 12pt
# Percentage values are in 'em's. Most robust specification.
# !!!! Incomplete
if font.scalable:
size = 'scalable'
else:
size = str(float(font.get_fontsize()))
# !!!! Incomplete
size_adjust = None
return FontEntry(font.fname, name, style, variant, weight, stretch, size)
def afmFontProperty(fontpath, font):
"""
A function for populating a :class:`FontKey` instance by
extracting information from the AFM font file.
*font* is a class:`AFM` instance.
"""
name = font.get_familyname()
fontname = font.get_fontname().lower()
# Styles are: italic, oblique, and normal (default)
if font.get_angle() != 0 or name.lower().find('italic') >= 0:
style = 'italic'
elif name.lower().find('oblique') >= 0:
style = 'oblique'
else:
style = 'normal'
# Variants are: small-caps and normal (default)
# !!!! Untested
if name.lower() in ['capitals', 'small-caps']:
variant = 'small-caps'
else:
variant = 'normal'
# Weights are: 100, 200, 300, 400 (normal: default), 500 (medium),
# 600 (semibold, demibold), 700 (bold), 800 (heavy), 900 (black)
# lighter and bolder are also allowed.
weight = weight_as_number(font.get_weight().lower())
# Stretch can be absolute and relative
# Absolute stretches are: ultra-condensed, extra-condensed, condensed,
# semi-condensed, normal, semi-expanded, expanded, extra-expanded,
# and ultra-expanded.
# Relative stretches are: wider, narrower
# Child value is: inherit
if fontname.find('narrow') >= 0 or fontname.find('condensed') >= 0 or \
fontname.find('cond') >= 0:
stretch = 'condensed'
elif fontname.find('demi cond') >= 0:
stretch = 'semi-condensed'
elif fontname.find('wide') >= 0 or fontname.find('expanded') >= 0:
stretch = 'expanded'
else:
stretch = 'normal'
# Sizes can be absolute and relative.
# Absolute sizes are: xx-small, x-small, small, medium, large, x-large,
# and xx-large.
# Relative sizes are: larger, smaller
# Length value is an absolute font size, e.g., 12pt
# Percentage values are in 'em's. Most robust specification.
# All AFM fonts are apparently scalable.
size = 'scalable'
# !!!! Incomplete
size_adjust = None
return FontEntry(fontpath, name, style, variant, weight, stretch, size)
def createFontList(fontfiles, fontext='ttf'):
"""
A function to create a font lookup list. The default is to create
a list of TrueType fonts. An AFM font list can optionally be
created.
"""
fontlist = []
# Add fonts from list of known font files.
seen = {}
for fpath in fontfiles:
verbose.report('createFontDict: %s' % (fpath), 'debug')
fname = os.path.split(fpath)[1]
if fname in seen:
continue
else:
seen[fname] = 1
if fontext == 'afm':
try:
fh = open(fpath, 'rb')
except:
verbose.report("Could not open font file %s" % fpath)
continue
try:
try:
font = afm.AFM(fh)
finally:
fh.close()
except RuntimeError:
verbose.report("Could not parse font file %s" % fpath)
continue
try:
prop = afmFontProperty(fpath, font)
except KeyError:
continue
else:
try:
font = ft2font.FT2Font(fpath)
except RuntimeError:
verbose.report("Could not open font file %s" % fpath)
continue
except UnicodeError:
verbose.report("Cannot handle unicode filenames")
# print >> sys.stderr, 'Bad file is', fpath
continue
except IOError:
verbose.report("IO error - cannot open font file %s" % fpath)
continue
try:
prop = ttfFontProperty(font)
except (KeyError, RuntimeError, ValueError):
continue
fontlist.append(prop)
return fontlist
class FontProperties(object):
"""
A class for storing and manipulating font properties.
The font properties are those described in the `W3C Cascading
Style Sheet, Level 1
<http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ font
specification. The six properties are:
- family: A list of font names in decreasing order of priority.
The items may include a generic font family name, either
'serif', 'sans-serif', 'cursive', 'fantasy', or 'monospace'.
In that case, the actual font to be used will be looked up
from the associated rcParam in :file:`matplotlibrc`.
- style: Either 'normal', 'italic' or 'oblique'.
- variant: Either 'normal' or 'small-caps'.
- stretch: A numeric value in the range 0-1000 or one of
'ultra-condensed', 'extra-condensed', 'condensed',
'semi-condensed', 'normal', 'semi-expanded', 'expanded',
'extra-expanded' or 'ultra-expanded'
- weight: A numeric value in the range 0-1000 or one of
'ultralight', 'light', 'normal', 'regular', 'book', 'medium',
'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy',
'extra bold', 'black'
- size: Either an relative value of 'xx-small', 'x-small',
'small', 'medium', 'large', 'x-large', 'xx-large' or an
absolute font size, e.g., 12
The default font property for TrueType fonts (as specified in the
default :file:`matplotlibrc` file) is::
sans-serif, normal, normal, normal, normal, scalable.
Alternatively, a font may be specified using an absolute path to a
.ttf file, by using the *fname* kwarg.
The preferred usage of font sizes is to use the relative values,
e.g., 'large', instead of absolute font sizes, e.g., 12. This
approach allows all text sizes to be made larger or smaller based
on the font manager's default font size.
This class will also accept a `fontconfig
<https://www.freedesktop.org/wiki/Software/fontconfig/>`_ pattern, if it is
the only argument provided. See the documentation on `fontconfig patterns
<https://www.freedesktop.org/software/fontconfig/fontconfig-user.html>`_.
This support does not require fontconfig to be installed. We are merely
borrowing its pattern syntax for use here.
Note that matplotlib's internal font manager and fontconfig use a
different algorithm to lookup fonts, so the results of the same pattern
may be different in matplotlib than in other applications that use
fontconfig.
"""
def __init__(self,
family = None,
style = None,
variant= None,
weight = None,
stretch= None,
size = None,
fname = None, # if this is set, it's a hardcoded filename to use
_init = None # used only by copy()
):
self._family = _normalize_font_family(rcParams['font.family'])
self._slant = rcParams['font.style']
self._variant = rcParams['font.variant']
self._weight = rcParams['font.weight']
self._stretch = rcParams['font.stretch']
self._size = rcParams['font.size']
self._file = None
# This is used only by copy()
if _init is not None:
self.__dict__.update(_init.__dict__)
return
if is_string_like(family):
# Treat family as a fontconfig pattern if it is the only
# parameter provided.
if (style is None and
variant is None and
weight is None and
stretch is None and
size is None and
fname is None):
self.set_fontconfig_pattern(family)
return
self.set_family(family)
self.set_style(style)
self.set_variant(variant)
self.set_weight(weight)
self.set_stretch(stretch)
self.set_file(fname)
self.set_size(size)
def _parse_fontconfig_pattern(self, pattern):
return parse_fontconfig_pattern(pattern)
def __hash__(self):
l = (tuple(self.get_family()),
self.get_slant(),
self.get_variant(),
self.get_weight(),
self.get_stretch(),
self.get_size_in_points(),
self.get_file())
return hash(l)
def __eq__(self, other):
return hash(self) == hash(other)
def __ne__(self, other):
return hash(self) != hash(other)
def __str__(self):
return self.get_fontconfig_pattern()
def get_family(self):
"""
Return a list of font names that comprise the font family.
"""
return self._family
def get_name(self):
"""
Return the name of the font that best matches the font
properties.
"""
return get_font(findfont(self)).family_name
def get_style(self):
"""
Return the font style. Values are: 'normal', 'italic' or
'oblique'.
"""
return self._slant
get_slant = get_style
def get_variant(self):
"""
Return the font variant. Values are: 'normal' or
'small-caps'.
"""
return self._variant
def get_weight(self):
"""
Set the font weight. Options are: A numeric value in the
range 0-1000 or one of 'light', 'normal', 'regular', 'book',
'medium', 'roman', 'semibold', 'demibold', 'demi', 'bold',
'heavy', 'extra bold', 'black'
"""
return self._weight
def get_stretch(self):
"""
Return the font stretch or width. Options are: 'ultra-condensed',
'extra-condensed', 'condensed', 'semi-condensed', 'normal',
'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded'.
"""
return self._stretch
def get_size(self):
"""
Return the font size.
"""
return self._size
def get_size_in_points(self):
return self._size
def get_file(self):
"""
Return the filename of the associated font.
"""
return self._file
def get_fontconfig_pattern(self):
"""
Get a fontconfig pattern suitable for looking up the font as
specified with fontconfig's ``fc-match`` utility.
See the documentation on `fontconfig patterns
<https://www.freedesktop.org/software/fontconfig/fontconfig-user.html>`_.
This support does not require fontconfig to be installed or
support for it to be enabled. We are merely borrowing its
pattern syntax for use here.
"""
return generate_fontconfig_pattern(self)
def set_family(self, family):
"""
Change the font family. May be either an alias (generic name
is CSS parlance), such as: 'serif', 'sans-serif', 'cursive',
'fantasy', or 'monospace', a real font name or a list of real
font names. Real font names are not supported when
`text.usetex` is `True`.
"""
if family is None:
family = rcParams['font.family']
self._family = _normalize_font_family(family)
set_name = set_family
def set_style(self, style):
"""
Set the font style. Values are: 'normal', 'italic' or
'oblique'.
"""
if style is None:
style = rcParams['font.style']
if style not in ('normal', 'italic', 'oblique'):
raise ValueError("style must be normal, italic or oblique")
self._slant = style
set_slant = set_style
def set_variant(self, variant):
"""
Set the font variant. Values are: 'normal' or 'small-caps'.
"""
if variant is None:
variant = rcParams['font.variant']
if variant not in ('normal', 'small-caps'):
raise ValueError("variant must be normal or small-caps")
self._variant = variant
def set_weight(self, weight):
"""
Set the font weight. May be either a numeric value in the
range 0-1000 or one of 'ultralight', 'light', 'normal',
'regular', 'book', 'medium', 'roman', 'semibold', 'demibold',
'demi', 'bold', 'heavy', 'extra bold', 'black'
"""
if weight is None:
weight = rcParams['font.weight']
try:
weight = int(weight)
if weight < 0 or weight > 1000:
raise ValueError()
except ValueError:
if weight not in weight_dict:
raise ValueError("weight is invalid")
weight = weight_dict[weight]
self._weight = weight
def set_stretch(self, stretch):
"""
Set the font stretch or width. Options are: 'ultra-condensed',
'extra-condensed', 'condensed', 'semi-condensed', 'normal',
'semi-expanded', 'expanded', 'extra-expanded' or
'ultra-expanded', or a numeric value in the range 0-1000.
"""
if stretch is None:
stretch = rcParams['font.stretch']
try:
stretch = int(stretch)
if stretch < 0 or stretch > 1000:
raise ValueError()
except ValueError:
if stretch not in stretch_dict:
raise ValueError("stretch is invalid")
self._stretch = stretch
def set_size(self, size):
"""
Set the font size. Either an relative value of 'xx-small',
'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'
or an absolute font size, e.g., 12.
"""
if size is None:
size = rcParams['font.size']
try:
size = float(size)
except ValueError:
try:
scale = font_scalings[size]
except KeyError:
raise ValueError(
"Size is invalid. Valid font size are " + ", ".join(
str(i) for i in font_scalings.keys()))
else:
size = scale * FontManager.get_default_size()
self._size = size
def set_file(self, file):
"""
Set the filename of the fontfile to use. In this case, all
other properties will be ignored.
"""
self._file = file
def set_fontconfig_pattern(self, pattern):
"""
Set the properties by parsing a fontconfig *pattern*.
See the documentation on `fontconfig patterns
<https://www.freedesktop.org/software/fontconfig/fontconfig-user.html>`_.
This support does not require fontconfig to be installed or
support for it to be enabled. We are merely borrowing its
pattern syntax for use here.
"""
for key, val in six.iteritems(self._parse_fontconfig_pattern(pattern)):
if type(val) == list:
getattr(self, "set_" + key)(val[0])
else:
getattr(self, "set_" + key)(val)
def copy(self):
"""Return a deep copy of self"""
return FontProperties(_init=self)
def ttfdict_to_fnames(d):
"""
flatten a ttfdict to all the filenames it contains
"""
fnames = []
for named in six.itervalues(d):
for styled in six.itervalues(named):
for variantd in six.itervalues(styled):
for weightd in six.itervalues(variantd):
for stretchd in six.itervalues(weightd):
for fname in six.itervalues(stretchd):
fnames.append(fname)
return fnames
def pickle_dump(data, filename):
"""
Equivalent to pickle.dump(data, open(filename, 'w'))
but closes the file to prevent filehandle leakage.
"""
with open(filename, 'wb') as fh:
pickle.dump(data, fh)
def pickle_load(filename):
"""
Equivalent to pickle.load(open(filename, 'r'))
but closes the file to prevent filehandle leakage.
"""
with open(filename, 'rb') as fh:
data = pickle.load(fh)
return data
def _normalize_font_family(family):
if is_string_like(family):
family = [six.text_type(family)]
elif isinstance(family, Iterable):
family = [six.text_type(f) for f in family]
return family
class TempCache(object):
"""
A class to store temporary caches that are (a) not saved to disk
and (b) invalidated whenever certain font-related
rcParams---namely the family lookup lists---are changed or the
font cache is reloaded. This avoids the expensive linear search
through all fonts every time a font is looked up.
"""
# A list of rcparam names that, when changed, invalidated this
# cache.
invalidating_rcparams = (
'font.serif', 'font.sans-serif', 'font.cursive', 'font.fantasy',
'font.monospace')
def __init__(self):
self._lookup_cache = {}
self._last_rcParams = self.make_rcparams_key()
def make_rcparams_key(self):
return [id(fontManager)] + [
rcParams[param] for param in self.invalidating_rcparams]
def get(self, prop):
key = self.make_rcparams_key()
if key != self._last_rcParams:
self._lookup_cache = {}
self._last_rcParams = key
return self._lookup_cache.get(prop)
def set(self, prop, value):
key = self.make_rcparams_key()
if key != self._last_rcParams:
self._lookup_cache = {}
self._last_rcParams = key
self._lookup_cache[prop] = value
class FontManager(object):
"""
On import, the :class:`FontManager` singleton instance creates a
list of TrueType fonts based on the font properties: name, style,
variant, weight, stretch, and size. The :meth:`findfont` method
does a nearest neighbor search to find the font that most closely
matches the specification. If no good enough match is found, a
default font is returned.
"""
# Increment this version number whenever the font cache data
# format or behavior has changed and requires a existing font
# cache files to be rebuilt.
__version__ = 200
def __init__(self, size=None, weight='normal'):
self._version = self.__version__
self.__default_weight = weight
self.default_size = size
paths = [os.path.join(rcParams['datapath'], 'fonts', 'ttf'),
os.path.join(rcParams['datapath'], 'fonts', 'afm'),
os.path.join(rcParams['datapath'], 'fonts', 'pdfcorefonts')]
# Create list of font paths
for pathname in ['TTFPATH', 'AFMPATH']:
if pathname in os.environ:
ttfpath = os.environ[pathname]
if ttfpath.find(';') >= 0: #win32 style
paths.extend(ttfpath.split(';'))
elif ttfpath.find(':') >= 0: # unix style
paths.extend(ttfpath.split(':'))
else:
paths.append(ttfpath)
verbose.report('font search path %s'%(str(paths)))
# Load TrueType fonts and create font dictionary.
self.ttffiles = findSystemFonts(paths) + findSystemFonts()
self.defaultFamily = {
'ttf': 'DejaVu Sans',
'afm': 'Helvetica'}
self.defaultFont = {}
for fname in self.ttffiles:
verbose.report('trying fontname %s' % fname, 'debug')
if fname.lower().find('DejaVuSans.ttf')>=0:
self.defaultFont['ttf'] = fname
break
else:
# use anything
self.defaultFont['ttf'] = self.ttffiles[0]
self.ttflist = createFontList(self.ttffiles)
self.afmfiles = findSystemFonts(paths, fontext='afm') + \
findSystemFonts(fontext='afm')
self.afmlist = createFontList(self.afmfiles, fontext='afm')
if len(self.afmfiles):
self.defaultFont['afm'] = self.afmfiles[0]
else:
self.defaultFont['afm'] = None
def get_default_weight(self):
"""
Return the default font weight.
"""
return self.__default_weight
@staticmethod
def get_default_size():
"""
Return the default font size.
"""
return rcParams['font.size']
def set_default_weight(self, weight):
"""
Set the default font weight. The initial value is 'normal'.
"""
self.__default_weight = weight
def update_fonts(self, filenames):
"""
Update the font dictionary with new font files.
Currently not implemented.
"""
# !!!! Needs implementing
raise NotImplementedError
# Each of the scoring functions below should return a value between
# 0.0 (perfect match) and 1.0 (terrible match)
def score_family(self, families, family2):
"""
Returns a match score between the list of font families in
*families* and the font family name *family2*.
An exact match at the head of the list returns 0.0.
A match further down the list will return between 0 and 1.
No match will return 1.0.
"""
if not isinstance(families, (list, tuple)):
families = [families]
elif len(families) == 0:
return 1.0
family2 = family2.lower()
step = 1 / len(families)
for i, family1 in enumerate(families):
family1 = family1.lower()
if family1 in font_family_aliases:
if family1 in ('sans', 'sans serif'):
family1 = 'sans-serif'
options = rcParams['font.' + family1]
options = [x.lower() for x in options]
if family2 in options:
idx = options.index(family2)
return (i + (idx / len(options))) * step
elif family1 == family2:
# The score should be weighted by where in the
# list the font was found.
return i * step
return 1.0
def score_style(self, style1, style2):
"""
Returns a match score between *style1* and *style2*.
An exact match returns 0.0.
A match between 'italic' and 'oblique' returns 0.1.
No match returns 1.0.
"""
if style1 == style2:
return 0.0
elif style1 in ('italic', 'oblique') and \
style2 in ('italic', 'oblique'):
return 0.1
return 1.0
def score_variant(self, variant1, variant2):
"""
Returns a match score between *variant1* and *variant2*.
An exact match returns 0.0, otherwise 1.0.
"""
if variant1 == variant2:
return 0.0
else:
return 1.0
def score_stretch(self, stretch1, stretch2):
"""
Returns a match score between *stretch1* and *stretch2*.
The result is the absolute value of the difference between the
CSS numeric values of *stretch1* and *stretch2*, normalized
between 0.0 and 1.0.
"""
try:
stretchval1 = int(stretch1)
except ValueError:
stretchval1 = stretch_dict.get(stretch1, 500)
try:
stretchval2 = int(stretch2)
except ValueError:
stretchval2 = stretch_dict.get(stretch2, 500)
return abs(stretchval1 - stretchval2) / 1000.0
def score_weight(self, weight1, weight2):
"""
Returns a match score between *weight1* and *weight2*.
The result is the absolute value of the difference between the
CSS numeric values of *weight1* and *weight2*, normalized
between 0.0 and 1.0.
"""
try:
weightval1 = int(weight1)
except ValueError:
weightval1 = weight_dict.get(weight1, 500)
try:
weightval2 = int(weight2)
except ValueError:
weightval2 = weight_dict.get(weight2, 500)
return abs(weightval1 - weightval2) / 1000.0
def score_size(self, size1, size2):
"""
Returns a match score between *size1* and *size2*.
If *size2* (the size specified in the font file) is 'scalable', this
function always returns 0.0, since any font size can be generated.
Otherwise, the result is the absolute distance between *size1* and
*size2*, normalized so that the usual range of font sizes (6pt -
72pt) will lie between 0.0 and 1.0.
"""
if size2 == 'scalable':
return 0.0
# Size value should have already been
try:
sizeval1 = float(size1)
except ValueError:
sizeval1 = self.default_size * font_scalings(size1)
try:
sizeval2 = float(size2)
except ValueError:
return 1.0
return abs(sizeval1 - sizeval2) / 72.0
def findfont(self, prop, fontext='ttf', directory=None,
fallback_to_default=True, rebuild_if_missing=True):
"""
Search the font list for the font that most closely matches
the :class:`FontProperties` *prop*.
:meth:`findfont` performs a nearest neighbor search. Each
font is given a similarity score to the target font
properties. The first font with the highest score is
returned. If no matches below a certain threshold are found,
the default font (usually DejaVu Sans) is returned.
`directory`, is specified, will only return fonts from the
given directory (or subdirectory of that directory).
The result is cached, so subsequent lookups don't have to
perform the O(n) nearest neighbor search.
If `fallback_to_default` is True, will fallback to the default
font family (usually "DejaVu Sans" or "Helvetica") if
the first lookup hard-fails.
See the `W3C Cascading Style Sheet, Level 1
<http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ documentation
for a description of the font finding algorithm.
"""
if not isinstance(prop, FontProperties):
prop = FontProperties(prop)
fname = prop.get_file()
if fname is not None:
verbose.report('findfont returning %s'%fname, 'debug')
return fname
if fontext == 'afm':
fontlist = self.afmlist
else:
fontlist = self.ttflist
if directory is None:
cached = _lookup_cache[fontext].get(prop)
if cached is not None:
return cached
best_score = 1e64
best_font = None
for font in fontlist:
if (directory is not None and
os.path.commonprefix([font.fname, directory]) != directory):
continue
# Matching family should have highest priority, so it is multiplied
# by 10.0
score = \
self.score_family(prop.get_family(), font.name) * 10.0 + \
self.score_style(prop.get_style(), font.style) + \
self.score_variant(prop.get_variant(), font.variant) + \
self.score_weight(prop.get_weight(), font.weight) + \
self.score_stretch(prop.get_stretch(), font.stretch) + \
self.score_size(prop.get_size(), font.size)
if score < best_score:
best_score = score
best_font = font
if score == 0:
break
if best_font is None or best_score >= 10.0:
if fallback_to_default:
warnings.warn(
'findfont: Font family %s not found. Falling back to %s' %
(prop.get_family(), self.defaultFamily[fontext]))
default_prop = prop.copy()
default_prop.set_family(self.defaultFamily[fontext])
return self.findfont(default_prop, fontext, directory, False)
else:
# This is a hard fail -- we can't find anything reasonable,
# so just return the DejuVuSans.ttf
warnings.warn(
'findfont: Could not match %s. Returning %s' %
(prop, self.defaultFont[fontext]),
UserWarning)
result = self.defaultFont[fontext]
else:
verbose.report(
'findfont: Matching %s to %s (%s) with score of %f' %
(prop, best_font.name, repr(best_font.fname), best_score))
result = best_font.fname
if not os.path.isfile(result):
if rebuild_if_missing:
verbose.report(
'findfont: Found a missing font file. Rebuilding cache.')
_rebuild()
return fontManager.findfont(
prop, fontext, directory, True, False)
else:
raise ValueError("No valid font could be found")
if directory is None:
_lookup_cache[fontext].set(prop, result)
return result
_is_opentype_cff_font_cache = {}
def is_opentype_cff_font(filename):
"""
Returns True if the given font is a Postscript Compact Font Format
Font embedded in an OpenType wrapper. Used by the PostScript and
PDF backends that can not subset these fonts.
"""
if os.path.splitext(filename)[1].lower() == '.otf':
result = _is_opentype_cff_font_cache.get(filename)
if result is None:
with open(filename, 'rb') as fd:
tag = fd.read(4)
result = (tag == b'OTTO')
_is_opentype_cff_font_cache[filename] = result
return result
return False
fontManager = None
_fmcache = None
get_font = lru_cache(64)(ft2font.FT2Font)
# The experimental fontconfig-based backend.
if USE_FONTCONFIG and sys.platform != 'win32':
import re
def fc_match(pattern, fontext):
fontexts = get_fontext_synonyms(fontext)
ext = "." + fontext
try:
pipe = subprocess.Popen(
['fc-match', '-s', '--format=%{file}\\n', pattern],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output = pipe.communicate()[0]
except (OSError, IOError):
return None
# The bulk of the output from fc-list is ascii, so we keep the
# result in bytes and parse it as bytes, until we extract the
# filename, which is in sys.filesystemencoding().
if pipe.returncode == 0:
for fname in output.split(b'\n'):
try:
fname = six.text_type(fname, sys.getfilesystemencoding())
except UnicodeDecodeError:
continue
if os.path.splitext(fname)[1][1:] in fontexts:
return fname
return None
_fc_match_cache = {}
def findfont(prop, fontext='ttf'):
if not is_string_like(prop):
prop = prop.get_fontconfig_pattern()
cached = _fc_match_cache.get(prop)
if cached is not None:
return cached
result = fc_match(prop, fontext)
if result is None:
result = fc_match(':', fontext)
_fc_match_cache[prop] = result
return result
else:
_fmcache = None
cachedir = get_cachedir()
if cachedir is not None:
if six.PY3:
_fmcache = os.path.join(cachedir, 'fontList.py3k.cache')
else:
_fmcache = os.path.join(cachedir, 'fontList.cache')
fontManager = None
_lookup_cache = {
'ttf': TempCache(),
'afm': TempCache()
}
def _rebuild():
global fontManager
fontManager = FontManager()
if _fmcache:
with cbook.Locked(cachedir):
pickle_dump(fontManager, _fmcache)
verbose.report("generated new fontManager")
if _fmcache:
try:
fontManager = pickle_load(_fmcache)
if (not hasattr(fontManager, '_version') or
fontManager._version != FontManager.__version__):
_rebuild()
else:
fontManager.default_size = None
verbose.report("Using fontManager instance from %s" % _fmcache)
except cbook.Locked.TimeoutError:
raise
except:
_rebuild()
else:
_rebuild()
def findfont(prop, **kw):
global fontManager
font = fontManager.findfont(prop, **kw)
return font
| apache-2.0 |
lbishal/scikit-learn | benchmarks/bench_plot_approximate_neighbors.py | 244 | 6011 | """
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
legend_rects = [plt.Rectangle((0, 0), 0.1, 0.1, fc=color)
for color in colors]
legend_labels = ['n_estimators={n_estimators}, '
'n_candidates={n_candidates}'.format(**p)
for p in params_list]
# Plot precision
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
| bsd-3-clause |
liangz0707/scikit-learn | sklearn/neighbors/classification.py | 132 | 14388 | """Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default = 'minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights)],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
shenzebang/scikit-learn | examples/model_selection/plot_validation_curve.py | 229 | 1823 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
zfrenchee/pandas | pandas/tests/frame/test_join.py | 11 | 5226 | # -*- coding: utf-8 -*-
import pytest
import numpy as np
from pandas import DataFrame, Index, PeriodIndex
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
@pytest.fixture
def frame_with_period_index():
return DataFrame(
data=np.arange(20).reshape(4, 5),
columns=list('abcde'),
index=PeriodIndex(start='2000', freq='A', periods=4))
@pytest.fixture
def frame():
return TestData().frame
@pytest.fixture
def left():
return DataFrame({'a': [20, 10, 0]}, index=[2, 1, 0])
@pytest.fixture
def right():
return DataFrame({'b': [300, 100, 200]}, index=[3, 1, 2])
@pytest.mark.parametrize(
"how, sort, expected",
[('inner', False, DataFrame({'a': [20, 10],
'b': [200, 100]},
index=[2, 1])),
('inner', True, DataFrame({'a': [10, 20],
'b': [100, 200]},
index=[1, 2])),
('left', False, DataFrame({'a': [20, 10, 0],
'b': [200, 100, np.nan]},
index=[2, 1, 0])),
('left', True, DataFrame({'a': [0, 10, 20],
'b': [np.nan, 100, 200]},
index=[0, 1, 2])),
('right', False, DataFrame({'a': [np.nan, 10, 20],
'b': [300, 100, 200]},
index=[3, 1, 2])),
('right', True, DataFrame({'a': [10, 20, np.nan],
'b': [100, 200, 300]},
index=[1, 2, 3])),
('outer', False, DataFrame({'a': [0, 10, 20, np.nan],
'b': [np.nan, 100, 200, 300]},
index=[0, 1, 2, 3])),
('outer', True, DataFrame({'a': [0, 10, 20, np.nan],
'b': [np.nan, 100, 200, 300]},
index=[0, 1, 2, 3]))])
def test_join(left, right, how, sort, expected):
result = left.join(right, how=how, sort=sort)
tm.assert_frame_equal(result, expected)
def test_join_index(frame):
# left / right
f = frame.loc[frame.index[:10], ['A', 'B']]
f2 = frame.loc[frame.index[5:], ['C', 'D']].iloc[::-1]
joined = f.join(f2)
tm.assert_index_equal(f.index, joined.index)
expected_columns = Index(['A', 'B', 'C', 'D'])
tm.assert_index_equal(joined.columns, expected_columns)
joined = f.join(f2, how='left')
tm.assert_index_equal(joined.index, f.index)
tm.assert_index_equal(joined.columns, expected_columns)
joined = f.join(f2, how='right')
tm.assert_index_equal(joined.index, f2.index)
tm.assert_index_equal(joined.columns, expected_columns)
# inner
joined = f.join(f2, how='inner')
tm.assert_index_equal(joined.index, f.index[5:10])
tm.assert_index_equal(joined.columns, expected_columns)
# outer
joined = f.join(f2, how='outer')
tm.assert_index_equal(joined.index, frame.index.sort_values())
tm.assert_index_equal(joined.columns, expected_columns)
tm.assert_raises_regex(
ValueError, 'join method', f.join, f2, how='foo')
# corner case - overlapping columns
for how in ('outer', 'left', 'inner'):
with tm.assert_raises_regex(ValueError, 'columns overlap but '
'no suffix'):
frame.join(frame, how=how)
def test_join_index_more(frame):
af = frame.loc[:, ['A', 'B']]
bf = frame.loc[::2, ['C', 'D']]
expected = af.copy()
expected['C'] = frame['C'][::2]
expected['D'] = frame['D'][::2]
result = af.join(bf)
tm.assert_frame_equal(result, expected)
result = af.join(bf, how='right')
tm.assert_frame_equal(result, expected[::2])
result = bf.join(af, how='right')
tm.assert_frame_equal(result, expected.loc[:, result.columns])
def test_join_index_series(frame):
df = frame.copy()
s = df.pop(frame.columns[-1])
joined = df.join(s)
# TODO should this check_names ?
tm.assert_frame_equal(joined, frame, check_names=False)
s.name = None
tm.assert_raises_regex(ValueError, 'must have a name', df.join, s)
def test_join_overlap(frame):
df1 = frame.loc[:, ['A', 'B', 'C']]
df2 = frame.loc[:, ['B', 'C', 'D']]
joined = df1.join(df2, lsuffix='_df1', rsuffix='_df2')
df1_suf = df1.loc[:, ['B', 'C']].add_suffix('_df1')
df2_suf = df2.loc[:, ['B', 'C']].add_suffix('_df2')
no_overlap = frame.loc[:, ['A', 'D']]
expected = df1_suf.join(df2_suf).join(no_overlap)
# column order not necessarily sorted
tm.assert_frame_equal(joined, expected.loc[:, joined.columns])
def test_join_period_index(frame_with_period_index):
other = frame_with_period_index.rename(
columns=lambda x: '{key}{key}'.format(key=x))
joined_values = np.concatenate(
[frame_with_period_index.values] * 2, axis=1)
joined_cols = frame_with_period_index.columns.append(other.columns)
joined = frame_with_period_index.join(other)
expected = DataFrame(
data=joined_values,
columns=joined_cols,
index=frame_with_period_index.index)
tm.assert_frame_equal(joined, expected)
| bsd-3-clause |
bert9bert/statsmodels | statsmodels/tsa/regime_switching/markov_switching.py | 2 | 88234 | """
Markov switching models
Author: Chad Fulton
License: BSD-3
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import pandas as pd
from statsmodels.compat.collections import OrderedDict
from scipy.misc import logsumexp
from statsmodels.base.data import PandasData
import statsmodels.tsa.base.tsa_model as tsbase
from statsmodels.tools.data import _is_using_pandas
from statsmodels.tools.tools import Bunch
from statsmodels.tools.numdiff import approx_fprime_cs, approx_hess_cs
from statsmodels.tools.decorators import cache_readonly, resettable_cache
from statsmodels.tools.eval_measures import aic, bic, hqic
from statsmodels.tools.tools import pinv_extended
from statsmodels.tools.sm_exceptions import EstimationWarning
import statsmodels.base.wrapper as wrap
from statsmodels.tsa.statespace.tools import find_best_blas_type
from statsmodels.tsa.regime_switching._hamilton_filter import (
shamilton_filter, dhamilton_filter, chamilton_filter, zhamilton_filter)
from statsmodels.tsa.regime_switching._kim_smoother import (
skim_smoother, dkim_smoother, ckim_smoother, zkim_smoother)
prefix_hamilton_filter_map = {
's': shamilton_filter, 'd': dhamilton_filter,
'c': chamilton_filter, 'z': zhamilton_filter
}
prefix_kim_smoother_map = {
's': skim_smoother, 'd': dkim_smoother,
'c': ckim_smoother, 'z': zkim_smoother
}
def _prepare_exog(exog):
k_exog = 0
if exog is not None:
exog_is_using_pandas = _is_using_pandas(exog, None)
if not exog_is_using_pandas:
exog = np.asarray(exog)
# Make sure we have 2-dimensional array
if exog.ndim == 1:
if not exog_is_using_pandas:
exog = exog[:, None]
else:
exog = pd.DataFrame(exog)
k_exog = exog.shape[1]
return k_exog, exog
def _logistic(x):
"""
Note that this is not a vectorized function
"""
x = np.array(x)
# np.exp(x) / (1 + np.exp(x))
if x.ndim == 0:
y = np.reshape(x, (1, 1, 1))
# np.exp(x[i]) / (1 + np.sum(np.exp(x[:])))
elif x.ndim == 1:
y = np.reshape(x, (len(x), 1, 1))
# np.exp(x[i,t]) / (1 + np.sum(np.exp(x[:,t])))
elif x.ndim == 2:
y = np.reshape(x, (x.shape[0], 1, x.shape[1]))
# np.exp(x[i,j,t]) / (1 + np.sum(np.exp(x[:,j,t])))
elif x.ndim == 3:
y = x
else:
raise NotImplementedError
tmp = np.c_[np.zeros((y.shape[-1], y.shape[1], 1)), y.T].T
evaluated = np.reshape(np.exp(y - logsumexp(tmp, axis=0)), x.shape)
return evaluated
def _partials_logistic(x):
"""
Note that this is not a vectorized function
"""
tmp = _logistic(x)
# k
if tmp.ndim == 0:
return tmp - tmp**2
# k x k
elif tmp.ndim == 1:
partials = np.diag(tmp - tmp**2)
# k x k x t
elif tmp.ndim == 2:
partials = [np.diag(tmp[:, t] - tmp[:, t]**2)
for t in range(tmp.shape[1])]
shape = tmp.shape[1], tmp.shape[0], tmp.shape[0]
partials = np.concatenate(partials).reshape(shape).transpose((1, 2, 0))
# k x k x j x t
else:
partials = [[np.diag(tmp[:, j, t] - tmp[:, j, t]**2)
for t in range(tmp.shape[2])]
for j in range(tmp.shape[1])]
shape = tmp.shape[1], tmp.shape[2], tmp.shape[0], tmp.shape[0]
partials = np.concatenate(partials).reshape(shape).transpose(
(2, 3, 0, 1))
for i in range(tmp.shape[0]):
for j in range(i):
partials[i, j, ...] = -tmp[i, ...] * tmp[j, ...]
partials[j, i, ...] = partials[i, j, ...]
return partials
def py_hamilton_filter(initial_probabilities, regime_transition,
conditional_likelihoods):
"""
Hamilton filter using pure Python
Parameters
----------
initial_probabilities : array
Array of initial probabilities, shaped (k_regimes,).
regime_transition : array
Matrix of regime transition probabilities, shaped either
(k_regimes, k_regimes, 1) or if there are time-varying transition
probabilities (k_regimes, k_regimes, nobs).
conditional_likelihoods : array
Array of likelihoods conditional on the last `order+1` regimes,
shaped (k_regimes,)*(order + 1) + (nobs,).
Returns
-------
filtered_marginal_probabilities : array
Array containing Pr[S_t=s_t | Y_t] - the probability of being in each
regime conditional on time t information. Shaped (k_regimes, nobs).
predicted_joint_probabilities : array
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_{t-1}] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on time t-1
information. Shaped (k_regimes,) * (order + 1) + (nobs,).
joint_likelihoods : array
Array of likelihoods condition on time t information, shaped (nobs,).
filtered_joint_probabilities : array
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_{t}] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on time t
information. Shaped (k_regimes,) * (order + 1) + (nobs,).
"""
# Dimensions
k_regimes = len(initial_probabilities)
nobs = conditional_likelihoods.shape[-1]
order = conditional_likelihoods.ndim - 2
dtype = conditional_likelihoods.dtype
# Storage
# Pr[S_t = s_t | Y_t]
filtered_marginal_probabilities = (
np.zeros((k_regimes, nobs), dtype=dtype))
# Pr[S_t = s_t, ... S_{t-r} = s_{t-r} | Y_{t-1}]
predicted_joint_probabilities = np.zeros(
(k_regimes,) * (order + 1) + (nobs,), dtype=dtype)
# f(y_t | Y_{t-1})
joint_likelihoods = np.zeros((nobs,), dtype)
# Pr[S_t = s_t, ... S_{t-r} = s_{t-r} | Y_t]
filtered_joint_probabilities = np.zeros(
(k_regimes,) * (order + 1) + (nobs + 1,), dtype=dtype)
# Initial probabilities
filtered_marginal_probabilities[:, 0] = initial_probabilities
tmp = np.copy(initial_probabilities)
shape = (k_regimes, k_regimes)
for i in range(order):
tmp = np.reshape(regime_transition[..., i], shape + (1,) * i) * tmp
filtered_joint_probabilities[..., 0] = tmp
# Reshape regime_transition so we can use broadcasting
shape = (k_regimes, k_regimes)
shape += (1,) * (order-1)
shape += (regime_transition.shape[-1],)
regime_transition = np.reshape(regime_transition, shape)
# Get appropriate subset of transition matrix
if regime_transition.shape[-1] > 1:
regime_transition = regime_transition[..., order:]
# Hamilton filter iterations
transition_t = 0
for t in range(nobs):
if regime_transition.shape[-1] > 1:
transition_t = t
# S_t, S_{t-1}, ..., S_{t-r} | t-1, stored at zero-indexed location t
predicted_joint_probabilities[..., t] = (
# S_t | S_{t-1}
regime_transition[..., transition_t] *
# S_{t-1}, S_{t-2}, ..., S_{t-r} | t-1
filtered_joint_probabilities[..., t].sum(axis=-1))
# f(y_t, S_t, ..., S_{t-r} | t-1)
tmp = (conditional_likelihoods[..., t] *
predicted_joint_probabilities[..., t])
# f(y_t | t-1)
joint_likelihoods[t] = np.sum(tmp)
# S_t, S_{t-1}, ..., S_{t-r} | t, stored at index t+1
filtered_joint_probabilities[..., t+1] = (
tmp / joint_likelihoods[t])
# S_t | t
filtered_marginal_probabilities = filtered_joint_probabilities[..., 1:]
for i in range(1, filtered_marginal_probabilities.ndim - 1):
filtered_marginal_probabilities = np.sum(
filtered_marginal_probabilities, axis=-2)
return (filtered_marginal_probabilities, predicted_joint_probabilities,
joint_likelihoods, filtered_joint_probabilities[..., 1:])
def cy_hamilton_filter(initial_probabilities, regime_transition,
conditional_likelihoods):
"""
Hamilton filter using Cython inner loop
Parameters
----------
initial_probabilities : array
Array of initial probabilities, shaped (k_regimes,).
regime_transition : array
Matrix of regime transition probabilities, shaped either
(k_regimes, k_regimes, 1) or if there are time-varying transition
probabilities (k_regimes, k_regimes, nobs).
conditional_likelihoods : array
Array of likelihoods conditional on the last `order+1` regimes,
shaped (k_regimes,)*(order + 1) + (nobs,).
Returns
-------
filtered_marginal_probabilities : array
Array containing Pr[S_t=s_t | Y_t] - the probability of being in each
regime conditional on time t information. Shaped (k_regimes, nobs).
predicted_joint_probabilities : array
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_{t-1}] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on time t-1
information. Shaped (k_regimes,) * (order + 1) + (nobs,).
joint_likelihoods : array
Array of likelihoods condition on time t information, shaped (nobs,).
filtered_joint_probabilities : array
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_{t}] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on time t
information. Shaped (k_regimes,) * (order + 1) + (nobs,).
"""
# Dimensions
k_regimes = len(initial_probabilities)
nobs = conditional_likelihoods.shape[-1]
order = conditional_likelihoods.ndim - 2
dtype = conditional_likelihoods.dtype
# Storage
# Pr[S_t = s_t | Y_t]
filtered_marginal_probabilities = (
np.zeros((k_regimes, nobs), dtype=dtype))
# Pr[S_t = s_t, ... S_{t-r} = s_{t-r} | Y_{t-1}]
# Has k_regimes^(order+1) elements
predicted_joint_probabilities = np.zeros(
(k_regimes,) * (order + 1) + (nobs,), dtype=dtype)
# f(y_t | Y_{t-1})
joint_likelihoods = np.zeros((nobs,), dtype)
# Pr[S_t = s_t, ... S_{t-r+1} = s_{t-r+1} | Y_t]
# Has k_regimes^order elements
filtered_joint_probabilities = np.zeros(
(k_regimes,) * (order + 1) + (nobs + 1,), dtype=dtype)
# Initial probabilities
filtered_marginal_probabilities[:, 0] = initial_probabilities
tmp = np.copy(initial_probabilities)
shape = (k_regimes, k_regimes)
transition_t = 0
for i in range(order):
if regime_transition.shape[-1] > 1:
transition_t = i
tmp = np.reshape(regime_transition[..., transition_t],
shape + (1,) * i) * tmp
filtered_joint_probabilities[..., 0] = tmp
# Get appropriate subset of transition matrix
if regime_transition.shape[-1] > 1:
regime_transition = regime_transition[..., order:]
# Run Cython filter iterations
prefix, dtype, _ = find_best_blas_type((
regime_transition, conditional_likelihoods, joint_likelihoods,
predicted_joint_probabilities, filtered_joint_probabilities))
func = prefix_hamilton_filter_map[prefix]
func(nobs, k_regimes, order, regime_transition,
conditional_likelihoods.reshape(k_regimes**(order+1), nobs),
joint_likelihoods,
predicted_joint_probabilities.reshape(k_regimes**(order+1), nobs),
filtered_joint_probabilities.reshape(k_regimes**(order+1), nobs+1))
# S_t | t
filtered_marginal_probabilities = filtered_joint_probabilities[..., 1:]
for i in range(1, filtered_marginal_probabilities.ndim - 1):
filtered_marginal_probabilities = np.sum(
filtered_marginal_probabilities, axis=-2)
return (filtered_marginal_probabilities, predicted_joint_probabilities,
joint_likelihoods, filtered_joint_probabilities[..., 1:])
def py_kim_smoother(regime_transition, predicted_joint_probabilities,
filtered_joint_probabilities):
"""
Kim smoother using pure Python
Parameters
----------
regime_transition : array
Matrix of regime transition probabilities, shaped either
(k_regimes, k_regimes, 1) or if there are time-varying transition
probabilities (k_regimes, k_regimes, nobs).
predicted_joint_probabilities : array
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_{t-1}] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on time t-1
information. Shaped (k_regimes,) * (order + 1) + (nobs,).
filtered_joint_probabilities : array
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_{t}] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on time t
information. Shaped (k_regimes,) * (order + 1) + (nobs,).
Returns
-------
smoothed_joint_probabilities : array
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_T] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on all information.
Shaped (k_regimes,) * (order + 1) + (nobs,).
smoothed_marginal_probabilities : array
Array containing Pr[S_t=s_t | Y_T] - the probability of being in each
regime conditional on all information. Shaped (k_regimes, nobs).
"""
# Dimensions
k_regimes = filtered_joint_probabilities.shape[0]
nobs = filtered_joint_probabilities.shape[-1]
order = filtered_joint_probabilities.ndim - 2
dtype = filtered_joint_probabilities.dtype
# Storage
smoothed_joint_probabilities = np.zeros(
(k_regimes,) * (order + 1) + (nobs,), dtype=dtype)
smoothed_marginal_probabilities = np.zeros((k_regimes, nobs), dtype=dtype)
# S_T, S_{T-1}, ..., S_{T-r} | T
smoothed_joint_probabilities[..., -1] = (
filtered_joint_probabilities[..., -1])
# Reshape transition so we can use broadcasting
shape = (k_regimes, k_regimes)
shape += (1,) * (order)
shape += (regime_transition.shape[-1],)
regime_transition = np.reshape(regime_transition, shape)
# Get appropriate subset of transition matrix
if regime_transition.shape[-1] == nobs + order:
regime_transition = regime_transition[..., order:]
# Kim smoother iterations
transition_t = 0
for t in range(nobs - 2, -1, -1):
if regime_transition.shape[-1] > 1:
transition_t = t + 1
# S_{t+1}, S_t, ..., S_{t-r+1} | t
# x = predicted_joint_probabilities[..., t]
x = (filtered_joint_probabilities[..., t] *
regime_transition[..., transition_t])
# S_{t+1}, S_t, ..., S_{t-r+2} | T / S_{t+1}, S_t, ..., S_{t-r+2} | t
y = (smoothed_joint_probabilities[..., t+1] /
predicted_joint_probabilities[..., t+1])
# S_t, S_{t-1}, ..., S_{t-r+1} | T
smoothed_joint_probabilities[..., t] = (x * y[..., None]).sum(axis=0)
# Get smoothed marginal probabilities S_t | T by integrating out
# S_{t-k+1}, S_{t-k+2}, ..., S_{t-1}
smoothed_marginal_probabilities = smoothed_joint_probabilities
for i in range(1, smoothed_marginal_probabilities.ndim - 1):
smoothed_marginal_probabilities = np.sum(
smoothed_marginal_probabilities, axis=-2)
return smoothed_joint_probabilities, smoothed_marginal_probabilities
def cy_kim_smoother(regime_transition, predicted_joint_probabilities,
filtered_joint_probabilities):
"""
Kim smoother using Cython inner loop
Parameters
----------
regime_transition : array
Matrix of regime transition probabilities, shaped either
(k_regimes, k_regimes, 1) or if there are time-varying transition
probabilities (k_regimes, k_regimes, nobs).
predicted_joint_probabilities : array
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_{t-1}] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on time t-1
information. Shaped (k_regimes,) * (order + 1) + (nobs,).
filtered_joint_probabilities : array
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_{t}] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on time t
information. Shaped (k_regimes,) * (order + 1) + (nobs,).
Returns
-------
smoothed_joint_probabilities : array
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_T] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on all information.
Shaped (k_regimes,) * (order + 1) + (nobs,).
smoothed_marginal_probabilities : array
Array containing Pr[S_t=s_t | Y_T] - the probability of being in each
regime conditional on all information. Shaped (k_regimes, nobs).
"""
# Dimensions
k_regimes = filtered_joint_probabilities.shape[0]
nobs = filtered_joint_probabilities.shape[-1]
order = filtered_joint_probabilities.ndim - 2
dtype = filtered_joint_probabilities.dtype
# Storage
smoothed_joint_probabilities = np.zeros(
(k_regimes,) * (order + 1) + (nobs,), dtype=dtype)
# Get appropriate subset of transition matrix
if regime_transition.shape[-1] == nobs + order:
regime_transition = regime_transition[..., order:]
# Run Cython smoother iterations
prefix, dtype, _ = find_best_blas_type((
regime_transition, predicted_joint_probabilities,
filtered_joint_probabilities))
func = prefix_kim_smoother_map[prefix]
func(nobs, k_regimes, order, regime_transition,
predicted_joint_probabilities.reshape(k_regimes**(order+1), nobs),
filtered_joint_probabilities.reshape(k_regimes**(order+1), nobs),
smoothed_joint_probabilities.reshape(k_regimes**(order+1), nobs))
# Get smoothed marginal probabilities S_t | T by integrating out
# S_{t-k+1}, S_{t-k+2}, ..., S_{t-1}
smoothed_marginal_probabilities = smoothed_joint_probabilities
for i in range(1, smoothed_marginal_probabilities.ndim - 1):
smoothed_marginal_probabilities = np.sum(
smoothed_marginal_probabilities, axis=-2)
return smoothed_joint_probabilities, smoothed_marginal_probabilities
class MarkovSwitchingParams(object):
"""
Class to hold parameters in Markov switching models
Parameters
----------
k_regimes : int
The number of regimes between which parameters may switch.
Notes
-----
The purpose is to allow selecting parameter indexes / slices based on
parameter type, regime number, or both.
Parameters are lexicographically ordered in the following way:
1. Named type string (e.g. "autoregressive")
2. Number (e.g. the first autoregressive parameter, then the second)
3. Regime (if applicable)
Parameter blocks are set using dictionary setter notation where the key
is the named type string and the value is a list of boolean values
indicating whether a given parameter is switching or not.
For example, consider the following code:
parameters = MarkovSwitchingParams(k_regimes=2)
parameters['regime_transition'] = [1,1]
parameters['exog'] = [0, 1]
This implies the model has 7 parameters: 4 "regime_transition"-related
parameters (2 parameters that each switch according to regimes) and 3
"exog"-related parameters (1 parameter that does not switch, and one 1 that
does).
The order of parameters is then:
1. The first "regime_transition" parameter, regime 0
2. The first "regime_transition" parameter, regime 1
3. The second "regime_transition" parameter, regime 1
4. The second "regime_transition" parameter, regime 1
5. The first "exog" parameter
6. The second "exog" parameter, regime 0
7. The second "exog" parameter, regime 1
Retrieving indexes / slices is done through dictionary getter notation.
There are three options for the dictionary key:
- Regime number (zero-indexed)
- Named type string (e.g. "autoregressive")
- Regime number and named type string
In the above example, consider the following getters:
>>> parameters[0]
array([0, 2, 4, 6])
>>> parameters[1]
array([1, 3, 5, 6])
>>> parameters['exog']
slice(4, 7, None)
>>> parameters[0, 'exog']
[4, 6]
>>> parameters[1, 'exog']
[4, 7]
Notice that in the last two examples, both lists of indexes include 4.
That's because that is the index of the the non-switching first "exog"
parameter, which should be selected regardless of the regime.
In addition to the getter, the `k_parameters` attribute is an OrderedDict
with the named type strings as the keys. It can be used to get the total
number of parameters of each type:
>>> parameters.k_parameters['regime_transition']
4
>>> parameters.k_parameters['exog']
3
"""
def __init__(self, k_regimes):
self.k_regimes = k_regimes
self.k_params = 0
self.k_parameters = OrderedDict()
self.switching = OrderedDict()
self.slices_purpose = OrderedDict()
self.relative_index_regime_purpose = [
OrderedDict() for i in range(self.k_regimes)]
self.index_regime_purpose = [
OrderedDict() for i in range(self.k_regimes)]
self.index_regime = [[] for i in range(self.k_regimes)]
def __getitem__(self, key):
_type = type(key)
# Get a slice for a block of parameters by purpose
if _type is str:
return self.slices_purpose[key]
# Get a slice for a block of parameters by regime
elif _type is int:
return self.index_regime[key]
elif _type is tuple:
if not len(key) == 2:
raise IndexError('Invalid index')
if type(key[1]) == str and type(key[0]) == int:
return self.index_regime_purpose[key[0]][key[1]]
elif type(key[0]) == str and type(key[1]) == int:
return self.index_regime_purpose[key[1]][key[0]]
else:
raise IndexError('Invalid index')
else:
raise IndexError('Invalid index')
def __setitem__(self, key, value):
_type = type(key)
if _type is str:
value = np.array(value, dtype=bool, ndmin=1)
k_params = self.k_params
self.k_parameters[key] = (
value.size + np.sum(value) * (self.k_regimes - 1))
self.k_params += self.k_parameters[key]
self.switching[key] = value
self.slices_purpose[key] = np.s_[k_params:self.k_params]
for j in range(self.k_regimes):
self.relative_index_regime_purpose[j][key] = []
self.index_regime_purpose[j][key] = []
offset = 0
for i in range(value.size):
switching = value[i]
for j in range(self.k_regimes):
# Non-switching parameters
if not switching:
self.relative_index_regime_purpose[j][key].append(
offset)
# Switching parameters
else:
self.relative_index_regime_purpose[j][key].append(
offset + j)
offset += 1 if not switching else self.k_regimes
for j in range(self.k_regimes):
offset = 0
indices = []
for k, v in self.relative_index_regime_purpose[j].items():
v = (np.r_[v] + offset).tolist()
self.index_regime_purpose[j][k] = v
indices.append(v)
offset += self.k_parameters[k]
self.index_regime[j] = np.concatenate(indices).astype(int)
else:
raise IndexError('Invalid index')
class MarkovSwitching(tsbase.TimeSeriesModel):
"""
First-order k-regime Markov switching model
Parameters
----------
endog : array_like
The endogenous variable.
k_regimes : integer
The number of regimes.
order : integer, optional
The order of the model describes the dependence of the likelihood on
previous regimes. This depends on the model in question and should be
set appropriately by subclasses.
exog_tvtp : array_like, optional
Array of exogenous or lagged variables to use in calculating
time-varying transition probabilities (TVTP). TVTP is only used if this
variable is provided. If an intercept is desired, a column of ones must
be explicitly included in this array.
Notes
-----
This model is new and API stability is not guaranteed, although changes
will be made in a backwards compatible way if possible.
References
----------
Kim, Chang-Jin, and Charles R. Nelson. 1999.
"State-Space Models with Regime Switching:
Classical and Gibbs-Sampling Approaches with Applications".
MIT Press Books. The MIT Press.
"""
def __init__(self, endog, k_regimes, order=0, exog_tvtp=None, exog=None,
dates=None, freq=None, missing='none'):
# Properties
self.k_regimes = k_regimes
self.tvtp = exog_tvtp is not None
# The order of the model may be overridden in subclasses
self.order = order
# Exogenous data
# TODO add checks for exog_tvtp consistent shape and indices
self.k_tvtp, self.exog_tvtp = _prepare_exog(exog_tvtp)
# Initialize the base model
super(MarkovSwitching, self).__init__(endog, exog, dates=dates,
freq=freq, missing=missing)
# Dimensions
self.nobs = self.endog.shape[0]
# Sanity checks
if self.endog.ndim > 1 and self.endog.shape[1] > 1:
raise ValueError('Must have univariate endogenous data.')
if self.k_regimes < 2:
raise ValueError('Markov switching models must have at least two'
' regimes.')
if not(self.exog_tvtp is None or self.exog_tvtp.shape[0] == self.nobs):
raise ValueError('Time-varying transition probabilities exogenous'
' array must have the same number of observations'
' as the endogenous array.')
# Parameters
self.parameters = MarkovSwitchingParams(self.k_regimes)
k_transition = self.k_regimes - 1
if self.tvtp:
k_transition *= self.k_tvtp
self.parameters['regime_transition'] = [1] * k_transition
# Internal model properties: default is steady-state initialization
self._initialization = 'steady-state'
self._initial_probabilities = None
@property
def k_params(self):
"""
(int) Number of parameters in the model
"""
return self.parameters.k_params
def initialize_steady_state(self):
"""
Set initialization of regime probabilities to be steady-state values
Notes
-----
Only valid if there are not time-varying transition probabilities.
"""
if self.tvtp:
raise ValueError('Cannot use steady-state initialization when'
' the regime transition matrix is time-varying.')
self._initialization = 'steady-state'
self._initial_probabilities = None
def initialize_known(self, probabilities, tol=1e-8):
"""
Set initialization of regime probabilities to use known values
"""
self._initialization = 'known'
probabilities = np.array(probabilities, ndmin=1)
if not probabilities.shape == (self.k_regimes,):
raise ValueError('Initial probabilities must be a vector of shape'
' (k_regimes,).')
if not np.abs(np.sum(probabilities) - 1) < tol:
raise ValueError('Initial probabilities vector must sum to one.')
self._initial_probabilities = probabilities
def initial_probabilities(self, params, regime_transition=None):
"""
Retrieve initial probabilities
"""
params = np.array(params, ndmin=1)
if self._initialization == 'steady-state':
if regime_transition is None:
regime_transition = self.regime_transition_matrix(params)
if regime_transition.ndim == 3:
regime_transition = regime_transition[..., 0]
m = regime_transition.shape[0]
A = np.c_[(np.eye(m) - regime_transition).T, np.ones(m)].T
try:
probabilities = np.linalg.pinv(A)[:, -1]
except np.linalg.LinAlgError:
raise RuntimeError('Steady-state probabilities could not be'
' constructed.')
elif self._initialization == 'known':
probabilities = self._initial_probabilities
else:
raise RuntimeError('Invalid initialization method selected.')
return probabilities
def _regime_transition_matrix_tvtp(self, params, exog_tvtp=None):
if exog_tvtp is None:
exog_tvtp = self.exog_tvtp
nobs = len(exog_tvtp)
regime_transition_matrix = np.zeros(
(self.k_regimes, self.k_regimes, nobs),
dtype=np.promote_types(np.float64, params.dtype))
# Compute the predicted values from the regression
for i in range(self.k_regimes):
coeffs = params[self.parameters[i, 'regime_transition']]
regime_transition_matrix[:-1, i, :] = np.dot(
exog_tvtp,
np.reshape(coeffs, (self.k_regimes-1, self.k_tvtp)).T).T
# Perform the logistic transformation
tmp = np.c_[np.zeros((nobs, self.k_regimes, 1)),
regime_transition_matrix[:-1, :, :].T].T
regime_transition_matrix[:-1, :, :] = np.exp(
regime_transition_matrix[:-1, :, :] - logsumexp(tmp, axis=0))
# Compute the last column of the transition matrix
regime_transition_matrix[-1, :, :] = (
1 - np.sum(regime_transition_matrix[:-1, :, :], axis=0))
return regime_transition_matrix
def regime_transition_matrix(self, params, exog_tvtp=None):
"""
Construct the left-stochastic transition matrix
Notes
-----
This matrix will either be shaped (k_regimes, k_regimes, 1) or if there
are time-varying transition probabilities, it will be shaped
(k_regimes, k_regimes, nobs).
The (i,j)th element of this matrix is the probability of transitioning
from regime j to regime i; thus the previous regime is represented in a
column and the next regime is represented by a row.
It is left-stochastic, meaning that each column sums to one (because
it is certain that from one regime (j) you will transition to *some
other regime*).
"""
params = np.array(params, ndmin=1)
if not self.tvtp:
regime_transition_matrix = np.zeros(
(self.k_regimes, self.k_regimes, 1),
dtype=np.promote_types(np.float64, params.dtype))
regime_transition_matrix[:-1, :, 0] = np.reshape(
params[self.parameters['regime_transition']],
(self.k_regimes-1, self.k_regimes))
regime_transition_matrix[-1, :, 0] = (
1 - np.sum(regime_transition_matrix[:-1, :, 0], axis=0))
else:
regime_transition_matrix = (
self._regime_transition_matrix_tvtp(params, exog_tvtp))
return regime_transition_matrix
def predict(self, params, start=None, end=None, probabilities=None,
conditional=False):
"""
In-sample prediction and out-of-sample forecasting
Parameters
----------
params : array
Parameters at which to form predictions
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction. Default is the last observation in
the sample.
probabilities : string or array_like, optional
Specifies the weighting probabilities used in constructing the
prediction as a weighted average. If a string, can be 'predicted',
'filtered', or 'smoothed'. Otherwise can be an array of
probabilities to use. Default is smoothed.
conditional: boolean or int, optional
Whether or not to return predictions conditional on current or
past regimes. If False, returns a single vector of weighted
predictions. If True or 1, returns predictions conditional on the
current regime. For larger integers, returns predictions
conditional on the current regime and some number of past regimes.
Returns
-------
predict : array
Array of out of in-sample predictions and / or out-of-sample
forecasts.
"""
if start is None:
start = self._index[0]
# Handle start, end
start, end, out_of_sample, prediction_index = (
self._get_prediction_index(start, end))
if out_of_sample > 0:
raise NotImplementedError
# Perform in-sample prediction
predict = self.predict_conditional(params)
squeezed = np.squeeze(predict)
# Check if we need to do weighted averaging
if squeezed.ndim - 1 > conditional:
# Determine in-sample weighting probabilities
if probabilities is None or probabilities == 'smoothed':
results = self.smooth(params, return_raw=True)
probabilities = results.smoothed_joint_probabilities
elif probabilities == 'filtered':
results = self.filter(params, return_raw=True)
probabilities = results.filtered_joint_probabilities
elif probabilities == 'predicted':
results = self.filter(params, return_raw=True)
probabilities = results.predicted_joint_probabilities
# Compute weighted average
predict = (predict * probabilities)
for i in range(predict.ndim - 1 - int(conditional)):
predict = np.sum(predict, axis=-2)
else:
predict = squeezed
return predict[start:end + out_of_sample + 1]
def predict_conditional(self, params):
"""
In-sample prediction, conditional on the current, and possibly past,
regimes
Parameters
----------
params : array_like
Array of parameters at which to perform prediction.
Returns
-------
predict : array_like
Array of predictions conditional on current, and possibly past,
regimes
"""
raise NotImplementedError
def _conditional_likelihoods(self, params):
"""
Compute likelihoods conditional on the current period's regime (and
the last self.order periods' regimes if self.order > 0).
Must be implemented in subclasses.
"""
raise NotImplementedError
def _filter(self, params, regime_transition=None):
# Get the regime transition matrix if not provided
if regime_transition is None:
regime_transition = self.regime_transition_matrix(params)
# Get the initial probabilities
initial_probabilities = self.initial_probabilities(
params, regime_transition)
# Compute the conditional likelihoods
conditional_likelihoods = self._conditional_likelihoods(params)
# Apply the filter
return ((regime_transition, initial_probabilities,
conditional_likelihoods) +
cy_hamilton_filter(initial_probabilities, regime_transition,
conditional_likelihoods))
def filter(self, params, transformed=True, cov_type=None, cov_kwds=None,
return_raw=False, results_class=None,
results_wrapper_class=None):
"""
Apply the Hamilton filter
Parameters
----------
params : array_like
Array of parameters at which to perform filtering.
transformed : boolean, optional
Whether or not `params` is already transformed. Default is True.
cov_type : str, optional
See `fit` for a description of covariance matrix types
for results object.
cov_kwds : dict or None, optional
See `fit` for a description of required keywords for alternative
covariance estimators
return_raw : boolean,optional
Whether or not to return only the raw Hamilton filter output or a
full results object. Default is to return a full results object.
results_class : type, optional
A results class to instantiate rather than
`MarkovSwitchingResults`. Usually only used internally by
subclasses.
results_wrapper_class : type, optional
A results wrapper class to instantiate rather than
`MarkovSwitchingResults`. Usually only used internally by
subclasses.
Returns
-------
MarkovSwitchingResults
"""
params = np.array(params, ndmin=1)
if not transformed:
params = self.transform_params(params)
# Save the parameter names
self.data.param_names = self.param_names
# Get the result
names = ['regime_transition', 'initial_probabilities',
'conditional_likelihoods', 'filtered_marginal_probabilities',
'predicted_joint_probabilities', 'joint_likelihoods',
'filtered_joint_probabilities']
result = HamiltonFilterResults(
self, Bunch(**dict(zip(names, self._filter(params)))))
# Wrap in a results object
if not return_raw:
result_kwargs = {}
if cov_type is not None:
result_kwargs['cov_type'] = cov_type
if cov_kwds is not None:
result_kwargs['cov_kwds'] = cov_kwds
if results_class is None:
results_class = MarkovSwitchingResults
if results_wrapper_class is None:
results_wrapper_class = MarkovSwitchingResultsWrapper
result = results_wrapper_class(
results_class(self, params, result, **result_kwargs)
)
return result
def _smooth(self, params, filtered_marginal_probabilities,
predicted_joint_probabilities,
filtered_joint_probabilities, regime_transition=None):
# Get the regime transition matrix
if regime_transition is None:
regime_transition = self.regime_transition_matrix(params)
# Apply the smoother
return cy_kim_smoother(regime_transition,
predicted_joint_probabilities,
filtered_joint_probabilities)
def smooth(self, params, transformed=True, cov_type=None, cov_kwds=None,
return_raw=False, results_class=None,
results_wrapper_class=None):
"""
Apply the Kim smoother and Hamilton filter
Parameters
----------
params : array_like
Array of parameters at which to perform filtering.
transformed : boolean, optional
Whether or not `params` is already transformed. Default is True.
cov_type : str, optional
See `fit` for a description of covariance matrix types
for results object.
cov_kwds : dict or None, optional
See `fit` for a description of required keywords for alternative
covariance estimators
return_raw : boolean,optional
Whether or not to return only the raw Hamilton filter output or a
full results object. Default is to return a full results object.
results_class : type, optional
A results class to instantiate rather than
`MarkovSwitchingResults`. Usually only used internally by
subclasses.
results_wrapper_class : type, optional
A results wrapper class to instantiate rather than
`MarkovSwitchingResults`. Usually only used internally by
subclasses.
Returns
-------
MarkovSwitchingResults
"""
params = np.array(params, ndmin=1)
if not transformed:
params = self.transform_params(params)
# Save the parameter names
self.data.param_names = self.param_names
# Hamilton filter
names = ['regime_transition', 'initial_probabilities',
'conditional_likelihoods', 'filtered_marginal_probabilities',
'predicted_joint_probabilities', 'joint_likelihoods',
'filtered_joint_probabilities']
result = Bunch(**dict(zip(names, self._filter(params))))
# Kim smoother
out = self._smooth(params, result.filtered_marginal_probabilities,
result.predicted_joint_probabilities,
result.filtered_joint_probabilities)
result['smoothed_joint_probabilities'] = out[0]
result['smoothed_marginal_probabilities'] = out[1]
result = KimSmootherResults(self, result)
# Wrap in a results object
if not return_raw:
result_kwargs = {}
if cov_type is not None:
result_kwargs['cov_type'] = cov_type
if cov_kwds is not None:
result_kwargs['cov_kwds'] = cov_kwds
if results_class is None:
results_class = MarkovSwitchingResults
if results_wrapper_class is None:
results_wrapper_class = MarkovSwitchingResultsWrapper
result = results_wrapper_class(
results_class(self, params, result, **result_kwargs)
)
return result
def loglikeobs(self, params, transformed=True):
"""
Loglikelihood evaluation for each period
Parameters
----------
params : array_like
Array of parameters at which to evaluate the loglikelihood
function.
transformed : boolean, optional
Whether or not `params` is already transformed. Default is True.
"""
params = np.array(params, ndmin=1)
if not transformed:
params = self.transform_params(params)
results = self._filter(params)
return np.log(results[5])
def loglike(self, params, transformed=True):
"""
Loglikelihood evaluation
Parameters
----------
params : array_like
Array of parameters at which to evaluate the loglikelihood
function.
transformed : boolean, optional
Whether or not `params` is already transformed. Default is True.
"""
return np.sum(self.loglikeobs(params, transformed))
def score(self, params, transformed=True):
"""
Compute the score function at params.
Parameters
----------
params : array_like
Array of parameters at which to evaluate the score
function.
transformed : boolean, optional
Whether or not `params` is already transformed. Default is True.
"""
params = np.array(params, ndmin=1)
return approx_fprime_cs(params, self.loglike, args=(transformed,))
def score_obs(self, params, transformed=True):
"""
Compute the score per observation, evaluated at params
Parameters
----------
params : array_like
Array of parameters at which to evaluate the score
function.
transformed : boolean, optional
Whether or not `params` is already transformed. Default is True.
"""
params = np.array(params, ndmin=1)
return approx_fprime_cs(params, self.loglikeobs, args=(transformed,))
def hessian(self, params, transformed=True):
"""
Hessian matrix of the likelihood function, evaluated at the given
parameters
Parameters
----------
params : array_like
Array of parameters at which to evaluate the Hessian
function.
transformed : boolean, optional
Whether or not `params` is already transformed. Default is True.
"""
params = np.array(params, ndmin=1)
return approx_hess_cs(params, self.loglike)
def fit(self, start_params=None, transformed=True, cov_type='approx',
cov_kwds=None, method='bfgs', maxiter=100, full_output=1, disp=0,
callback=None, return_params=False, em_iter=5, search_reps=0,
search_iter=5, search_scale=1., **kwargs):
"""
Fits the model by maximum likelihood via Hamilton filter.
Parameters
----------
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
If None, the default is given by Model.start_params.
transformed : boolean, optional
Whether or not `start_params` is already transformed. Default is
True.
cov_type : str, optional
The type of covariance matrix estimator to use. Can be one of
'approx', 'opg', 'robust', or 'none'. Default is 'approx'.
cov_kwds : dict or None, optional
Keywords for alternative covariance estimators
method : str, optional
The `method` determines which solver from `scipy.optimize`
is used, and it can be chosen from among the following strings:
- 'newton' for Newton-Raphson, 'nm' for Nelder-Mead
- 'bfgs' for Broyden-Fletcher-Goldfarb-Shanno (BFGS)
- 'lbfgs' for limited-memory BFGS with optional box constraints
- 'powell' for modified Powell's method
- 'cg' for conjugate gradient
- 'ncg' for Newton-conjugate gradient
- 'basinhopping' for global basin-hopping solver
The explicit arguments in `fit` are passed to the solver,
with the exception of the basin-hopping solver. Each
solver has several optional arguments that are not the same across
solvers. See the notes section below (or scipy.optimize) for the
available arguments and for the list of explicit arguments that the
basin-hopping solver supports.
maxiter : int, optional
The maximum number of iterations to perform.
full_output : boolean, optional
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
disp : boolean, optional
Set to True to print convergence messages.
callback : callable callback(xk), optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
return_params : boolean, optional
Whether or not to return only the array of maximizing parameters.
Default is False.
em_iter : int, optional
Number of initial EM iteration steps used to improve starting
parameters.
search_reps : int, optional
Number of randomly drawn search parameters that are drawn around
`start_params` to try and improve starting parameters. Default is
0.
search_iter : int, optional
Number of initial EM iteration steps used to improve each of the
search parameter repetitions.
search_scale : float or array, optional.
Scale of variates for random start parameter search.
**kwargs
Additional keyword arguments to pass to the optimizer.
Returns
-------
MarkovSwitchingResults
"""
if start_params is None:
start_params = self.start_params
transformed = True
else:
start_params = np.array(start_params, ndmin=1)
# Random search for better start parameters
if search_reps > 0:
start_params = self._start_params_search(
search_reps, start_params=start_params,
transformed=transformed, em_iter=search_iter,
scale=search_scale)
transformed = True
# Get better start params through EM algorithm
if em_iter and not self.tvtp:
start_params = self._fit_em(start_params, transformed=transformed,
maxiter=em_iter, tolerance=0,
return_params=True)
transformed = True
if transformed:
start_params = self.untransform_params(start_params)
# Maximum likelihood estimation by scoring
fargs = (False,)
mlefit = super(MarkovSwitching, self).fit(start_params, method=method,
fargs=fargs,
maxiter=maxiter,
full_output=full_output,
disp=disp, callback=callback,
skip_hessian=True, **kwargs)
# Just return the fitted parameters if requested
if return_params:
result = self.transform_params(mlefit.params)
# Otherwise construct the results class if desired
else:
result = self.smooth(mlefit.params, transformed=False,
cov_type=cov_type, cov_kwds=cov_kwds)
result.mlefit = mlefit
result.mle_retvals = mlefit.mle_retvals
result.mle_settings = mlefit.mle_settings
return result
def _fit_em(self, start_params=None, transformed=True, cov_type='none',
cov_kwds=None, maxiter=50, tolerance=1e-6, full_output=True,
return_params=False, **kwargs):
"""
Fits the model using the Expectation-Maximization (EM) algorithm
Parameters
----------
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
If None, the default is given by `start_params`.
transformed : boolean, optional
Whether or not `start_params` is already transformed. Default is
True.
cov_type : str, optional
The type of covariance matrix estimator to use. Can be one of
'approx', 'opg', 'robust', or 'none'. Default is 'none'.
cov_kwds : dict or None, optional
Keywords for alternative covariance estimators
maxiter : int, optional
The maximum number of iterations to perform.
tolerance : float, optional
The iteration stops when the difference between subsequent
loglikelihood values is less than this tolerance.
full_output : bool, optional
Set to True to have all available output in the Results object's
mle_retvals attribute. This includes all intermediate values for
parameters and loglikelihood values
return_params : boolean, optional
Whether or not to return only the array of maximizing parameters.
Default is False.
**kwargs
Additional keyword arguments to pass to the optimizer.
Notes
-----
This is a private method for finding good starting parameters for MLE
by scoring. It has not been tested for a thoroughly correct EM
implementation in all cases. It does not support TVTP transition
probabilities.
Returns
-------
MarkovSwitchingResults
"""
if start_params is None:
start_params = self.start_params
transformed = True
else:
start_params = np.array(start_params, ndmin=1)
if not transformed:
start_params = self.transform_params(start_params)
# Perform expectation-maximization
llf = []
params = [start_params]
i = 0
delta = 0
while i < maxiter and (i < 2 or (delta > tolerance)):
out = self._em_iteration(params[-1])
llf.append(out[0].llf)
params.append(out[1])
if i > 0:
delta = 2 * (llf[-1] - llf[-2]) / np.abs((llf[-1] + llf[-2]))
i += 1
# Just return the fitted parameters if requested
if return_params:
result = params[-1]
# Otherwise construct the results class if desired
else:
result = self.filter(params[-1], transformed=True,
cov_type=cov_type, cov_kwds=cov_kwds)
# Save the output
if full_output:
em_retvals = Bunch(**{'params': np.array(params),
'llf': np.array(llf),
'iter': i})
em_settings = Bunch(**{'tolerance': tolerance,
'maxiter': maxiter})
else:
em_retvals = None
em_settings = None
result.mle_retvals = em_retvals
result.mle_settings = em_settings
return result
def _em_iteration(self, params0):
"""
EM iteration
Notes
-----
The EM iteration in this base class only performs the EM step for
non-TVTP transition probabilities.
"""
params1 = np.zeros(params0.shape,
dtype=np.promote_types(np.float64, params0.dtype))
# Smooth at the given parameters
result = self.smooth(params0, transformed=True, return_raw=True)
# The EM with TVTP is not yet supported, just return the previous
# iteration parameters
if self.tvtp:
params1[self.parameters['regime_transition']] = (
params0[self.parameters['regime_transition']])
else:
regime_transition = self._em_regime_transition(result)
for i in range(self.k_regimes):
params1[self.parameters[i, 'regime_transition']] = (
regime_transition[i])
return result, params1
def _em_regime_transition(self, result):
"""
EM step for regime transition probabilities
"""
# Marginalize the smoothed joint probabilites to just S_t, S_{t-1} | T
tmp = result.smoothed_joint_probabilities
for i in range(tmp.ndim - 3):
tmp = np.sum(tmp, -2)
smoothed_joint_probabilities = tmp
# Transition parameters (recall we're not yet supporting TVTP here)
k_transition = len(self.parameters[0, 'regime_transition'])
regime_transition = np.zeros((self.k_regimes, k_transition))
for i in range(self.k_regimes): # S_{t_1}
for j in range(self.k_regimes - 1): # S_t
regime_transition[i, j] = (
np.sum(smoothed_joint_probabilities[j, i]) /
np.sum(result.smoothed_marginal_probabilities[i]))
# It may be the case that due to rounding error this estimates
# transition probabilities that sum to greater than one. If so,
# re-scale the probabilities and warn the user that something
# is not quite right
delta = np.sum(regime_transition[i]) - 1
if delta > 0:
warnings.warn('Invalid regime transition probabilities'
' estimated in EM iteration; probabilities have'
' been re-scaled to continue estimation.',
EstimationWarning)
regime_transition[i] /= 1 + delta + 1e-6
return regime_transition
def _start_params_search(self, reps, start_params=None, transformed=True,
em_iter=5, scale=1.):
"""
Search for starting parameters as random permutations of a vector
Parameters
----------
reps : int
Number of random permutations to try.
start_params : array, optional
Starting parameter vector. If not given, class-level start
parameters are used.
transformed : boolean, optional
If `start_params` was provided, whether or not those parameters
are already transformed. Default is True.
em_iter : int, optional
Number of EM iterations to apply to each random permutation.
scale : array or float, optional
Scale of variates for random start parameter search. Can be given
as an array of length equal to the number of parameters or as a
single scalar.
Notes
-----
This is a private method for finding good starting parameters for MLE
by scoring, where the defaults have been set heuristically.
"""
if start_params is None:
start_params = self.start_params
transformed = True
else:
start_params = np.array(start_params, ndmin=1)
# Random search is over untransformed space
if transformed:
start_params = self.untransform_params(start_params)
# Construct the standard deviations
scale = np.array(scale, ndmin=1)
if scale.size == 1:
scale = np.ones(self.k_params) * scale
if not scale.size == self.k_params:
raise ValueError('Scale of variates for random start'
' parameter search must be given for each'
' parameter or as a single scalar.')
# Construct the random variates
variates = np.zeros((reps, self.k_params))
for i in range(self.k_params):
variates[:, i] = scale[i] * np.random.uniform(-0.5, 0.5, size=reps)
llf = self.loglike(start_params, transformed=False)
params = start_params
for i in range(reps):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
proposed_params = self._fit_em(
start_params + variates[i], transformed=False,
maxiter=em_iter, return_params=True)
proposed_llf = self.loglike(proposed_params)
if proposed_llf > llf:
llf = proposed_llf
params = self.untransform_params(proposed_params)
except:
pass
# Return transformed parameters
return self.transform_params(params)
@property
def start_params(self):
"""
(array) Starting parameters for maximum likelihood estimation.
"""
params = np.zeros(self.k_params, dtype=np.float64)
# Transition probabilities
if self.tvtp:
params[self.parameters['regime_transition']] = 0.
else:
params[self.parameters['regime_transition']] = 1. / self.k_regimes
return params
@property
def param_names(self):
"""
(list of str) List of human readable parameter names (for parameters
actually included in the model).
"""
param_names = np.zeros(self.k_params, dtype=object)
# Transition probabilities
if self.tvtp:
# TODO add support for exog_tvtp_names
param_names[self.parameters['regime_transition']] = [
'p[%d->%d].tvtp%d' % (j, i, k)
for i in range(self.k_regimes-1)
for k in range(self.k_tvtp)
for j in range(self.k_regimes)
]
else:
param_names[self.parameters['regime_transition']] = [
'p[%d->%d]' % (j, i)
for i in range(self.k_regimes-1)
for j in range(self.k_regimes)]
return param_names.tolist()
def transform_params(self, unconstrained):
"""
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation
Parameters
----------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer, to be
transformed.
Returns
-------
constrained : array_like
Array of constrained parameters which may be used in likelihood
evalation.
Notes
-----
In the base class, this only transforms the transition-probability-
related parameters.
"""
constrained = np.array(unconstrained, copy=True)
constrained = constrained.astype(
np.promote_types(np.float64, constrained.dtype))
# Nothing to do for transition probabilities if TVTP
if self.tvtp:
constrained[self.parameters['regime_transition']] = (
unconstrained[self.parameters['regime_transition']])
# Otherwise do logistic transformation
else:
# Transition probabilities
for i in range(self.k_regimes):
tmp1 = unconstrained[self.parameters[i, 'regime_transition']]
tmp2 = np.r_[0, tmp1]
constrained[self.parameters[i, 'regime_transition']] = np.exp(
tmp1 - logsumexp(tmp2))
# Do not do anything for the rest of the parameters
return constrained
def _untransform_logistic(self, unconstrained, constrained):
"""
Function to allow using a numerical root-finder to reverse the
logistic transform.
"""
resid = np.zeros(unconstrained.shape, dtype=unconstrained.dtype)
exp = np.exp(unconstrained)
sum_exp = np.sum(exp)
for i in range(len(unconstrained)):
resid[i] = (unconstrained[i] -
np.log(1 + sum_exp - exp[i]) +
np.log(1 / constrained[i] - 1))
return resid
def untransform_params(self, constrained):
"""
Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer
Parameters
----------
constrained : array_like
Array of constrained parameters used in likelihood evalution, to be
transformed.
Returns
-------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer.
Notes
-----
In the base class, this only untransforms the transition-probability-
related parameters.
"""
unconstrained = np.array(constrained, copy=True)
unconstrained = unconstrained.astype(
np.promote_types(np.float64, unconstrained.dtype))
# Nothing to do for transition probabilities if TVTP
if self.tvtp:
unconstrained[self.parameters['regime_transition']] = (
constrained[self.parameters['regime_transition']])
# Otherwise reverse logistic transformation
else:
for i in range(self.k_regimes):
s = self.parameters[i, 'regime_transition']
if self.k_regimes == 2:
unconstrained[s] = -np.log(1. / constrained[s] - 1)
else:
from scipy.optimize import root
out = root(self._untransform_logistic,
np.zeros(unconstrained[s].shape,
unconstrained.dtype),
args=(constrained[s],))
if not out['success']:
raise ValueError('Could not untransform parameters.')
unconstrained[s] = out['x']
# Do not do anything for the rest of the parameters
return unconstrained
class HamiltonFilterResults(object):
"""
Results from applying the Hamilton filter to a state space model.
Parameters
----------
model : Representation
A Statespace representation
Attributes
----------
nobs : int
Number of observations.
k_endog : int
The dimension of the observation series.
k_regimes : int
The number of unobserved regimes.
regime_transition : array
The regime transition matrix.
initialization : str
Initialization method for regime probabilities.
initial_probabilities : array
Initial regime probabilities
conditional_likelihoods : array
The likelihood values at each time period, conditional on regime.
predicted_joint_probabilities : array
Predicted joint probabilities at each time period.
filtered_marginal_probabilities : array
Filtered marginal probabilities at each time period.
filtered_joint_probabilities : array
Filtered joint probabilities at each time period.
joint_likelihoods : array
The likelihood values at each time period.
llf_obs : array
The loglikelihood values at each time period.
"""
def __init__(self, model, result):
self.model = model
self.nobs = model.nobs
self.order = model.order
self.k_regimes = model.k_regimes
attributes = ['regime_transition', 'initial_probabilities',
'conditional_likelihoods',
'predicted_joint_probabilities',
'filtered_marginal_probabilities',
'filtered_joint_probabilities',
'joint_likelihoods']
for name in attributes:
setattr(self, name, getattr(result, name))
self.initialization = model._initialization
self.llf_obs = np.log(self.joint_likelihoods)
self.llf = np.sum(self.llf_obs)
# Subset transition if necessary (e.g. for Markov autoregression)
if self.regime_transition.shape[-1] > 1 and self.order > 0:
self.regime_transition = self.regime_transition[..., self.order:]
# Cache for predicted marginal probabilities
self._predicted_marginal_probabilities = None
@property
def predicted_marginal_probabilities(self):
if self._predicted_marginal_probabilities is None:
self._predicted_marginal_probabilities = (
self.predicted_joint_probabilities)
for i in range(self._predicted_marginal_probabilities.ndim - 2):
self._predicted_marginal_probabilities = np.sum(
self._predicted_marginal_probabilities, axis=-2)
return self._predicted_marginal_probabilities
@property
def expected_durations(self):
"""
(array) Expected duration of a regime, possibly time-varying.
"""
return 1. / (1 - np.diagonal(self.regime_transition).squeeze())
class KimSmootherResults(HamiltonFilterResults):
"""
Results from applying the Kim smoother to a Markov switching model.
Parameters
----------
model : MarkovSwitchingModel
The model object.
result : dict
A dictionary containing two keys: 'smoothd_joint_probabilities' and
'smoothed_marginal_probabilities'.
Attributes
----------
nobs : int
Number of observations.
k_endog : int
The dimension of the observation series.
k_states : int
The dimension of the unobserved state process.
"""
def __init__(self, model, result):
super(KimSmootherResults, self).__init__(model, result)
attributes = ['smoothed_joint_probabilities',
'smoothed_marginal_probabilities']
for name in attributes:
setattr(self, name, getattr(result, name))
class MarkovSwitchingResults(tsbase.TimeSeriesModelResults):
r"""
Class to hold results from fitting a Markov switching model
Parameters
----------
model : MarkovSwitching instance
The fitted model instance
params : array
Fitted parameters
filter_results : HamiltonFilterResults or KimSmootherResults instance
The underlying filter and, optionally, smoother output
cov_type : string
The type of covariance matrix estimator to use. Can be one of 'approx',
'opg', 'robust', or 'none'.
Attributes
----------
model : Model instance
A reference to the model that was fit.
filter_results : HamiltonFilterResults or KimSmootherResults instance
The underlying filter and, optionally, smoother output
nobs : float
The number of observations used to fit the model.
params : array
The parameters of the model.
scale : float
This is currently set to 1.0 and not used by the model or its results.
"""
use_t = False
def __init__(self, model, params, results, cov_type='opg', cov_kwds=None,
**kwargs):
self.data = model.data
tsbase.TimeSeriesModelResults.__init__(self, model, params,
normalized_cov_params=None,
scale=1.)
# Save the filter / smoother output
self.filter_results = results
if isinstance(results, KimSmootherResults):
self.smoother_results = results
else:
self.smoother_results = None
# Dimensions
self.nobs = model.nobs
self.order = model.order
self.k_regimes = model.k_regimes
# Setup covariance matrix notes dictionary
if not hasattr(self, 'cov_kwds'):
self.cov_kwds = {}
self.cov_type = cov_type
# Setup the cache
self._cache = resettable_cache()
# Handle covariance matrix calculation
if cov_kwds is None:
cov_kwds = {}
self._cov_approx_complex_step = (
cov_kwds.pop('approx_complex_step', True))
self._cov_approx_centered = cov_kwds.pop('approx_centered', False)
try:
self._rank = None
self._get_robustcov_results(cov_type=cov_type, use_self=True,
**cov_kwds)
except np.linalg.LinAlgError:
self._rank = 0
k_params = len(self.params)
self.cov_params_default = np.zeros((k_params, k_params)) * np.nan
self.cov_kwds['cov_type'] = (
'Covariance matrix could not be calculated: singular.'
' information matrix.')
# Copy over arrays
attributes = ['regime_transition', 'initial_probabilities',
'conditional_likelihoods',
'predicted_marginal_probabilities',
'predicted_joint_probabilities',
'filtered_marginal_probabilities',
'filtered_joint_probabilities',
'joint_likelihoods', 'expected_durations']
for name in attributes:
setattr(self, name, getattr(self.filter_results, name))
attributes = ['smoothed_joint_probabilities',
'smoothed_marginal_probabilities']
for name in attributes:
if self.smoother_results is not None:
setattr(self, name, getattr(self.smoother_results, name))
else:
setattr(self, name, None)
# Reshape some arrays to long-format
self.predicted_marginal_probabilities = (
self.predicted_marginal_probabilities.T)
self.filtered_marginal_probabilities = (
self.filtered_marginal_probabilities.T)
if self.smoother_results is not None:
self.smoothed_marginal_probabilities = (
self.smoothed_marginal_probabilities.T)
# Make into Pandas arrays if using Pandas data
if isinstance(self.data, PandasData):
index = self.data.row_labels
if self.expected_durations.ndim > 1:
self.expected_durations = pd.DataFrame(
self.expected_durations, index=index)
self.predicted_marginal_probabilities = pd.DataFrame(
self.predicted_marginal_probabilities, index=index)
self.filtered_marginal_probabilities = pd.DataFrame(
self.filtered_marginal_probabilities, index=index)
if self.smoother_results is not None:
self.smoothed_marginal_probabilities = pd.DataFrame(
self.smoothed_marginal_probabilities, index=index)
def _get_robustcov_results(self, cov_type='opg', **kwargs):
use_self = kwargs.pop('use_self', False)
if use_self:
res = self
else:
raise NotImplementedError
res = self.__class__(
self.model, self.params,
normalized_cov_params=self.normalized_cov_params,
scale=self.scale)
# Set the new covariance type
res.cov_type = cov_type
res.cov_kwds = {}
# Calculate the new covariance matrix
k_params = len(self.params)
if k_params == 0:
res.cov_params_default = np.zeros((0, 0))
res._rank = 0
res.cov_kwds['description'] = 'No parameters estimated.'
elif cov_type == 'none':
res.cov_params_default = np.zeros((k_params, k_params)) * np.nan
res._rank = np.nan
res.cov_kwds['description'] = 'Covariance matrix not calculated.'
elif self.cov_type == 'approx':
res.cov_params_default = res.cov_params_approx
res.cov_kwds['description'] = (
'Covariance matrix calculated using numerical'
' differentiation.')
elif self.cov_type == 'opg':
res.cov_params_default = res.cov_params_opg
res.cov_kwds['description'] = (
'Covariance matrix calculated using the outer product of'
' gradients.'
)
elif self.cov_type == 'robust':
res.cov_params_default = res.cov_params_robust
res.cov_kwds['description'] = (
'Quasi-maximum likelihood covariance matrix used for'
' robustness to some misspecifications; calculated using'
' numerical differentiation.')
else:
raise NotImplementedError('Invalid covariance matrix type.')
return res
@cache_readonly
def aic(self):
"""
(float) Akaike Information Criterion
"""
# return -2*self.llf + 2*self.params.shape[0]
return aic(self.llf, self.nobs, self.params.shape[0])
@cache_readonly
def bic(self):
"""
(float) Bayes Information Criterion
"""
# return -2*self.llf + self.params.shape[0]*np.log(self.nobs)
return bic(self.llf, self.nobs, self.params.shape[0])
@cache_readonly
def cov_params_approx(self):
"""
(array) The variance / covariance matrix. Computed using the numerical
Hessian approximated by complex step or finite differences methods.
"""
evaluated_hessian = self.model.hessian(self.params, transformed=True)
neg_cov, singular_values = pinv_extended(evaluated_hessian)
if self._rank is None:
self._rank = np.linalg.matrix_rank(np.diag(singular_values))
return -neg_cov
@cache_readonly
def cov_params_opg(self):
"""
(array) The variance / covariance matrix. Computed using the outer
product of gradients method.
"""
score_obs = self.model.score_obs(self.params, transformed=True).T
cov_params, singular_values = pinv_extended(
np.inner(score_obs, score_obs))
if self._rank is None:
self._rank = np.linalg.matrix_rank(np.diag(singular_values))
return cov_params
@cache_readonly
def cov_params_robust(self):
"""
(array) The QMLE variance / covariance matrix. Computed using the
numerical Hessian as the evaluated hessian.
"""
cov_opg = self.cov_params_opg
evaluated_hessian = self.model.hessian(self.params, transformed=True)
cov_params, singular_values = pinv_extended(
np.dot(np.dot(evaluated_hessian, cov_opg), evaluated_hessian)
)
if self._rank is None:
self._rank = np.linalg.matrix_rank(np.diag(singular_values))
return cov_params
@cache_readonly
def fittedvalues(self):
"""
(array) The predicted values of the model. An (nobs x k_endog) array.
"""
return self.model.predict(self.params)
@cache_readonly
def hqic(self):
"""
(float) Hannan-Quinn Information Criterion
"""
# return -2*self.llf + 2*np.log(np.log(self.nobs))*self.params.shape[0]
return hqic(self.llf, self.nobs, self.params.shape[0])
@cache_readonly
def llf_obs(self):
"""
(float) The value of the log-likelihood function evaluated at `params`.
"""
return self.model.loglikeobs(self.params)
@cache_readonly
def llf(self):
"""
(float) The value of the log-likelihood function evaluated at `params`.
"""
return self.model.loglike(self.params)
@cache_readonly
def resid(self):
"""
(array) The model residuals. An (nobs x k_endog) array.
"""
return self.model.endog - self.fittedvalues
def predict(self, start=None, end=None, probabilities=None,
conditional=False):
"""
In-sample prediction and out-of-sample forecasting
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction. Default is the last observation in
the sample.
probabilities : string or array_like, optional
Specifies the weighting probabilities used in constructing the
prediction as a weighted average. If a string, can be 'predicted',
'filtered', or 'smoothed'. Otherwise can be an array of
probabilities to use. Default is smoothed.
conditional: boolean or int, optional
Whether or not to return predictions conditional on current or
past regimes. If False, returns a single vector of weighted
predictions. If True or 1, returns predictions conditional on the
current regime. For larger integers, returns predictions
conditional on the current regime and some number of past regimes.
Returns
-------
predict : array
Array of out of in-sample predictions and / or out-of-sample
forecasts. An (npredict x k_endog) array.
"""
return self.model.predict(self.params, start=start, end=end,
probabilities=probabilities,
conditional=conditional)
def forecast(self, steps=1, **kwargs):
"""
Out-of-sample forecasts
Parameters
----------
steps : int, str, or datetime, optional
If an integer, the number of steps to forecast from the end of the
sample. Can also be a date string to parse or a datetime type.
However, if the dates index does not have a fixed frequency, steps
must be an integer. Default
**kwargs
Additional arguments may required for forecasting beyond the end
of the sample. See `FilterResults.predict` for more details.
Returns
-------
forecast : array
Array of out of sample forecasts. A (steps x k_endog) array.
"""
raise NotImplementedError
def summary(self, alpha=.05, start=None, title=None, model_name=None,
display_params=True):
"""
Summarize the Model
Parameters
----------
alpha : float, optional
Significance level for the confidence intervals. Default is 0.05.
start : int, optional
Integer of the start observation. Default is 0.
title : str, optional
The title of the summary table.
model_name : string
The name of the model used. Default is to use model class name.
display_params : boolean, optional
Whether or not to display tables of estimated parameters. Default
is True. Usually only used internally.
Returns
-------
summary : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary
"""
from statsmodels.iolib.summary import Summary
# Model specification results
model = self.model
if title is None:
title = 'Markov Switching Model Results'
if start is None:
start = 0
if self.data.dates is not None:
dates = self.data.dates
d = dates[start]
sample = ['%02d-%02d-%02d' % (d.month, d.day, d.year)]
d = dates[-1]
sample += ['- ' + '%02d-%02d-%02d' % (d.month, d.day, d.year)]
else:
sample = [str(start), ' - ' + str(self.model.nobs)]
# Standardize the model name as a list of str
if model_name is None:
model_name = model.__class__.__name__
# Create the tables
if not isinstance(model_name, list):
model_name = [model_name]
top_left = [('Dep. Variable:', None)]
top_left.append(('Model:', [model_name[0]]))
for i in range(1, len(model_name)):
top_left.append(('', ['+ ' + model_name[i]]))
top_left += [
('Date:', None),
('Time:', None),
('Sample:', [sample[0]]),
('', [sample[1]])
]
top_right = [
('No. Observations:', [self.model.nobs]),
('Log Likelihood', ["%#5.3f" % self.llf]),
('AIC', ["%#5.3f" % self.aic]),
('BIC', ["%#5.3f" % self.bic]),
('HQIC', ["%#5.3f" % self.hqic])
]
if hasattr(self, 'cov_type'):
top_left.append(('Covariance Type:', [self.cov_type]))
summary = Summary()
summary.add_table_2cols(self, gleft=top_left, gright=top_right,
title=title)
# Make parameters tables for each regime
from statsmodels.iolib.summary import summary_params
import re
def make_table(self, mask, title, strip_end=True):
res = (self, self.params[mask], self.bse[mask],
self.tvalues[mask], self.pvalues[mask],
self.conf_int(alpha)[mask])
param_names = [
re.sub('\[\d+\]$', '', name) for name in
np.array(self.data.param_names)[mask].tolist()
]
return summary_params(res, yname=None, xname=param_names,
alpha=alpha, use_t=False, title=title)
params = model.parameters
regime_masks = [[] for i in range(model.k_regimes)]
other_masks = {}
for key, switching in params.switching.items():
k_params = len(switching)
if key == 'regime_transition':
continue
other_masks[key] = []
for i in range(k_params):
if switching[i]:
for j in range(self.k_regimes):
regime_masks[j].append(params[j, key][i])
else:
other_masks[key].append(params[0, key][i])
for i in range(self.k_regimes):
mask = regime_masks[i]
if len(mask) > 0:
table = make_table(self, mask, 'Regime %d parameters' % i)
summary.tables.append(table)
mask = []
for key, _mask in other_masks.items():
mask.extend(_mask)
if len(mask) > 0:
table = make_table(self, mask, 'Non-switching parameters')
summary.tables.append(table)
# Transition parameters
mask = params['regime_transition']
table = make_table(self, mask, 'Regime transition parameters')
summary.tables.append(table)
# Add warnings/notes, added to text format only
etext = []
if hasattr(self, 'cov_type') and 'description' in self.cov_kwds:
etext.append(self.cov_kwds['description'])
if self._rank < len(self.params):
etext.append("Covariance matrix is singular or near-singular,"
" with condition number %6.3g. Standard errors may be"
" unstable." % np.linalg.cond(self.cov_params()))
if etext:
etext = ["[{0}] {1}".format(i + 1, text)
for i, text in enumerate(etext)]
etext.insert(0, "Warnings:")
summary.add_extra_txt(etext)
return summary
class MarkovSwitchingResultsWrapper(wrap.ResultsWrapper):
_attrs = {
'cov_params_approx': 'cov',
'cov_params_default': 'cov',
'cov_params_opg': 'cov',
'cov_params_robust': 'cov',
}
_wrap_attrs = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_attrs,
_attrs)
_methods = {
'forecast': 'dates',
}
_wrap_methods = wrap.union_dicts(
tsbase.TimeSeriesResultsWrapper._wrap_methods, _methods)
wrap.populate_wrapper(MarkovSwitchingResultsWrapper, MarkovSwitchingResults)
| bsd-3-clause |
BruceDai003/LSTM_stock_trading | stats.py | 1 | 2782 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 30 10:04:01 2017
@author: brucedai
"""
import os
from time import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import norm
data_dir = './dataset/'
output_dir = './stats/'
def ROCP(f_name, df):
'''
读取f_name中的股票/期货行情数据,计算涨跌幅,并存储到data中
'''
print('读取%s文件数据:' % f_name)
data = pd.read_csv(os.path.join(data_dir, f_name), sep='\t')
ret = data.close.pct_change()
col = f_name.split(sep='.')[0]
df[col] = ret
return df
def Plot_and_Savefig():
df = pd.read_csv(os.path.join(output_dir, 'stocks.csv'), index_col=0)
for col in df:
data = df[col].dropna()
mean_data = data.mean()
std_data = data.std()
skew_data = data.skew()
kurt_data = data.kurt()
print('股票%s日涨跌幅统计数据:' % col)
print('共%d个数据' % data.shape[0])
print('均值:\t%.4f' % mean_data)
print('标准差:\t%.4f' % std_data)
print('偏度:\t%.4f' % skew_data)
print('峰度:\t%.4f' % kurt_data)
fig, ax = plt.subplots(1, 1)
# 画出相应的正态分布
x_data = np.linspace(norm.ppf(0.0001,
loc=data.mean(), scale=data.std()),
norm.ppf(0.9999,
loc=data.mean(), scale=data.std()), 1000)
y_data = norm.pdf(x_data, loc=data.mean(), scale=data.std())
ax.plot(x_data, y_data, 'r-', lw=2, alpha=0.6, label='正态分布')
ax.hist(data, bins=50, normed=True, histtype='stepfilled', alpha=0.3)
plt.title('股票%s日涨跌幅统计直方图' % col)
plt.savefig(os.path.join(output_dir, '%s.png' % col))
def Extract_ROCP():
'''
读取data_dir里所有行情数据,计算涨跌幅后保存到output_dir一个文件中。
'''
start_time = time()
file_list = os.listdir(data_dir)
column_names = [s.split(sep='.')[0] for s in file_list]
df = pd.DataFrame(data=None, columns=column_names)
for f in file_list:
df = ROCP(f, df)
read_size = df.size
diff_time = time() - start_time
print('程序读取涨跌幅数据总量为%d\n耗时%.2f秒' %
(read_size, diff_time))
if not(os.path.exists(output_dir)):
os.mkdir(output_dir)
df.to_csv(os.path.join(output_dir, 'stocks.csv'))
diff_time = time() - start_time
print('程序存储涨跌幅数据总量为%d\n耗时%.2f秒' %
(read_size, diff_time))
if __name__ == '__main__':
Plot_and_Savefig() | mit |
bundgus/python-playground | matplotlib-playground/examples/api/skewt.py | 1 | 12500 | # This serves as an intensive exercise of matplotlib's transforms
# and custom projection API. This example produces a so-called
# SkewT-logP diagram, which is a common plot in meteorology for
# displaying vertical profiles of temperature. As far as matplotlib is
# concerned, the complexity comes from having X and Y axes that are
# not orthogonal. This is handled by including a skew component to the
# basic Axes transforms. Additional complexity comes in handling the
# fact that the upper and lower X-axes have different data ranges, which
# necessitates a bunch of custom classes for ticks,spines, and the axis
# to handle this.
from matplotlib.axes import Axes
import matplotlib.transforms as transforms
import matplotlib.axis as maxis
import matplotlib.spines as mspines
import matplotlib.path as mpath
from matplotlib.projections import register_projection
# The sole purpose of this class is to look at the upper, lower, or total
# interval as appropriate and see what parts of the tick to draw, if any.
class SkewXTick(maxis.XTick):
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group(self.__name__)
lower_interval = self.axes.xaxis.lower_interval
upper_interval = self.axes.xaxis.upper_interval
if self.gridOn and transforms.interval_contains(
self.axes.xaxis.get_view_interval(), self.get_loc()):
self.gridline.draw(renderer)
if transforms.interval_contains(lower_interval, self.get_loc()):
if self.tick1On:
self.tick1line.draw(renderer)
if self.label1On:
self.label1.draw(renderer)
if transforms.interval_contains(upper_interval, self.get_loc()):
if self.tick2On:
self.tick2line.draw(renderer)
if self.label2On:
self.label2.draw(renderer)
renderer.close_group(self.__name__)
# This class exists to provide two separate sets of intervals to the tick,
# as well as create instances of the custom tick
class SkewXAxis(maxis.XAxis):
def __init__(self, *args, **kwargs):
maxis.XAxis.__init__(self, *args, **kwargs)
self.upper_interval = 0.0, 1.0
def _get_tick(self, major):
return SkewXTick(self.axes, 0, '', major=major)
@property
def lower_interval(self):
return self.axes.viewLim.intervalx
def get_view_interval(self):
return self.upper_interval[0], self.axes.viewLim.intervalx[1]
# This class exists to calculate the separate data range of the
# upper X-axis and draw the spine there. It also provides this range
# to the X-axis artist for ticking and gridlines
class SkewSpine(mspines.Spine):
def _adjust_location(self):
trans = self.axes.transDataToAxes.inverted()
if self.spine_type == 'top':
yloc = 1.0
else:
yloc = 0.0
left = trans.transform_point((0.0, yloc))[0]
right = trans.transform_point((1.0, yloc))[0]
pts = self._path.vertices
pts[0, 0] = left
pts[1, 0] = right
self.axis.upper_interval = (left, right)
# This class handles registration of the skew-xaxes as a projection as well
# as setting up the appropriate transformations. It also overrides standard
# spines and axes instances as appropriate.
class SkewXAxes(Axes):
# The projection must specify a name. This will be used be the
# user to select the projection, i.e. ``subplot(111,
# projection='skewx')``.
name = 'skewx'
def _init_axis(self):
# Taken from Axes and modified to use our modified X-axis
self.xaxis = SkewXAxis(self)
self.spines['top'].register_axis(self.xaxis)
self.spines['bottom'].register_axis(self.xaxis)
self.yaxis = maxis.YAxis(self)
self.spines['left'].register_axis(self.yaxis)
self.spines['right'].register_axis(self.yaxis)
def _gen_axes_spines(self):
spines = {'top': SkewSpine.linear_spine(self, 'top'),
'bottom': mspines.Spine.linear_spine(self, 'bottom'),
'left': mspines.Spine.linear_spine(self, 'left'),
'right': mspines.Spine.linear_spine(self, 'right')}
return spines
def _set_lim_and_transforms(self):
"""
This is called once when the plot is created to set up all the
transforms for the data, text and grids.
"""
rot = 30
# Get the standard transform setup from the Axes base class
Axes._set_lim_and_transforms(self)
# Need to put the skew in the middle, after the scale and limits,
# but before the transAxes. This way, the skew is done in Axes
# coordinates thus performing the transform around the proper origin
# We keep the pre-transAxes transform around for other users, like the
# spines for finding bounds
self.transDataToAxes = self.transScale + \
self.transLimits + transforms.Affine2D().skew_deg(rot, 0)
# Create the full transform from Data to Pixels
self.transData = self.transDataToAxes + self.transAxes
# Blended transforms like this need to have the skewing applied using
# both axes, in axes coords like before.
self._xaxis_transform = (transforms.blended_transform_factory(
self.transScale + self.transLimits,
transforms.IdentityTransform()) +
transforms.Affine2D().skew_deg(rot, 0)) + self.transAxes
# Now register the projection with matplotlib so the user can select
# it.
register_projection(SkewXAxes)
if __name__ == '__main__':
# Now make a simple example using the custom projection.
from matplotlib.ticker import ScalarFormatter, MultipleLocator
import matplotlib.pyplot as plt
from six import StringIO
import numpy as np
# Some examples data
data_txt = '''
978.0 345 7.8 0.8 61 4.16 325 14 282.7 294.6 283.4
971.0 404 7.2 0.2 61 4.01 327 17 282.7 294.2 283.4
946.7 610 5.2 -1.8 61 3.56 335 26 282.8 293.0 283.4
944.0 634 5.0 -2.0 61 3.51 336 27 282.8 292.9 283.4
925.0 798 3.4 -2.6 65 3.43 340 32 282.8 292.7 283.4
911.8 914 2.4 -2.7 69 3.46 345 37 282.9 292.9 283.5
906.0 966 2.0 -2.7 71 3.47 348 39 283.0 293.0 283.6
877.9 1219 0.4 -3.2 77 3.46 0 48 283.9 293.9 284.5
850.0 1478 -1.3 -3.7 84 3.44 0 47 284.8 294.8 285.4
841.0 1563 -1.9 -3.8 87 3.45 358 45 285.0 295.0 285.6
823.0 1736 1.4 -0.7 86 4.44 353 42 290.3 303.3 291.0
813.6 1829 4.5 1.2 80 5.17 350 40 294.5 309.8 295.4
809.0 1875 6.0 2.2 77 5.57 347 39 296.6 313.2 297.6
798.0 1988 7.4 -0.6 57 4.61 340 35 299.2 313.3 300.1
791.0 2061 7.6 -1.4 53 4.39 335 33 300.2 313.6 301.0
783.9 2134 7.0 -1.7 54 4.32 330 31 300.4 313.6 301.2
755.1 2438 4.8 -3.1 57 4.06 300 24 301.2 313.7 301.9
727.3 2743 2.5 -4.4 60 3.81 285 29 301.9 313.8 302.6
700.5 3048 0.2 -5.8 64 3.57 275 31 302.7 313.8 303.3
700.0 3054 0.2 -5.8 64 3.56 280 31 302.7 313.8 303.3
698.0 3077 0.0 -6.0 64 3.52 280 31 302.7 313.7 303.4
687.0 3204 -0.1 -7.1 59 3.28 281 31 304.0 314.3 304.6
648.9 3658 -3.2 -10.9 55 2.59 285 30 305.5 313.8 305.9
631.0 3881 -4.7 -12.7 54 2.29 289 33 306.2 313.6 306.6
600.7 4267 -6.4 -16.7 44 1.73 295 39 308.6 314.3 308.9
592.0 4381 -6.9 -17.9 41 1.59 297 41 309.3 314.6 309.6
577.6 4572 -8.1 -19.6 39 1.41 300 44 310.1 314.9 310.3
555.3 4877 -10.0 -22.3 36 1.16 295 39 311.3 315.3 311.5
536.0 5151 -11.7 -24.7 33 0.97 304 39 312.4 315.8 312.6
533.8 5182 -11.9 -25.0 33 0.95 305 39 312.5 315.8 312.7
500.0 5680 -15.9 -29.9 29 0.64 290 44 313.6 315.9 313.7
472.3 6096 -19.7 -33.4 28 0.49 285 46 314.1 315.8 314.1
453.0 6401 -22.4 -36.0 28 0.39 300 50 314.4 315.8 314.4
400.0 7310 -30.7 -43.7 27 0.20 285 44 315.0 315.8 315.0
399.7 7315 -30.8 -43.8 27 0.20 285 44 315.0 315.8 315.0
387.0 7543 -33.1 -46.1 26 0.16 281 47 314.9 315.5 314.9
382.7 7620 -33.8 -46.8 26 0.15 280 48 315.0 315.6 315.0
342.0 8398 -40.5 -53.5 23 0.08 293 52 316.1 316.4 316.1
320.4 8839 -43.7 -56.7 22 0.06 300 54 317.6 317.8 317.6
318.0 8890 -44.1 -57.1 22 0.05 301 55 317.8 318.0 317.8
310.0 9060 -44.7 -58.7 19 0.04 304 61 319.2 319.4 319.2
306.1 9144 -43.9 -57.9 20 0.05 305 63 321.5 321.7 321.5
305.0 9169 -43.7 -57.7 20 0.05 303 63 322.1 322.4 322.1
300.0 9280 -43.5 -57.5 20 0.05 295 64 323.9 324.2 323.9
292.0 9462 -43.7 -58.7 17 0.05 293 67 326.2 326.4 326.2
276.0 9838 -47.1 -62.1 16 0.03 290 74 326.6 326.7 326.6
264.0 10132 -47.5 -62.5 16 0.03 288 79 330.1 330.3 330.1
251.0 10464 -49.7 -64.7 16 0.03 285 85 331.7 331.8 331.7
250.0 10490 -49.7 -64.7 16 0.03 285 85 332.1 332.2 332.1
247.0 10569 -48.7 -63.7 16 0.03 283 88 334.7 334.8 334.7
244.0 10649 -48.9 -63.9 16 0.03 280 91 335.6 335.7 335.6
243.3 10668 -48.9 -63.9 16 0.03 280 91 335.8 335.9 335.8
220.0 11327 -50.3 -65.3 15 0.03 280 85 343.5 343.6 343.5
212.0 11569 -50.5 -65.5 15 0.03 280 83 346.8 346.9 346.8
210.0 11631 -49.7 -64.7 16 0.03 280 83 349.0 349.1 349.0
200.0 11950 -49.9 -64.9 15 0.03 280 80 353.6 353.7 353.6
194.0 12149 -49.9 -64.9 15 0.03 279 78 356.7 356.8 356.7
183.0 12529 -51.3 -66.3 15 0.03 278 75 360.4 360.5 360.4
164.0 13233 -55.3 -68.3 18 0.02 277 69 365.2 365.3 365.2
152.0 13716 -56.5 -69.5 18 0.02 275 65 371.1 371.2 371.1
150.0 13800 -57.1 -70.1 18 0.02 275 64 371.5 371.6 371.5
136.0 14414 -60.5 -72.5 19 0.02 268 54 376.0 376.1 376.0
132.0 14600 -60.1 -72.1 19 0.02 265 51 380.0 380.1 380.0
131.4 14630 -60.2 -72.2 19 0.02 265 51 380.3 380.4 380.3
128.0 14792 -60.9 -72.9 19 0.02 266 50 381.9 382.0 381.9
125.0 14939 -60.1 -72.1 19 0.02 268 49 385.9 386.0 385.9
119.0 15240 -62.2 -73.8 20 0.01 270 48 387.4 387.5 387.4
112.0 15616 -64.9 -75.9 21 0.01 265 53 389.3 389.3 389.3
108.0 15838 -64.1 -75.1 21 0.01 265 58 394.8 394.9 394.8
107.8 15850 -64.1 -75.1 21 0.01 265 58 395.0 395.1 395.0
105.0 16010 -64.7 -75.7 21 0.01 272 50 396.9 396.9 396.9
103.0 16128 -62.9 -73.9 21 0.02 277 45 402.5 402.6 402.5
100.0 16310 -62.5 -73.5 21 0.02 285 36 406.7 406.8 406.7
'''
# Parse the data
sound_data = StringIO(data_txt)
p, h, T, Td = np.loadtxt(sound_data, usecols=range(0, 4), unpack=True)
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(6.5875, 6.2125))
ax = fig.add_subplot(111, projection='skewx')
plt.grid(True)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dicatated by the typical meteorological plot
ax.semilogy(T, p, 'r')
ax.semilogy(Td, p, 'g')
# An example of a slanted line at constant X
l = ax.axvline(0, color='b')
# Disables the log-formatting that comes with semilogy
ax.yaxis.set_major_formatter(ScalarFormatter())
ax.set_yticks(np.linspace(100, 1000, 10))
ax.set_ylim(1050, 100)
ax.xaxis.set_major_locator(MultipleLocator(10))
ax.set_xlim(-50, 50)
plt.show()
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.